diff --git "a/6453.jsonl" "b/6453.jsonl" new file mode 100644--- /dev/null +++ "b/6453.jsonl" @@ -0,0 +1,615 @@ +{"seq_id":"44912561","text":"class Action:\n \n def __init__(self, unit, target):\n self.unit = unit\n self.target = target\n\n def complete(self):\n self.unit.walked = []\n self.unit.action = None\n self.unit._flee_or_fight_if_enemy()\n\n def update(self):\n pass\n\n\nclass MoveAction(Action):\n \n def update(self):\n if hasattr(self.target, \"other_side\"):\n # move towards the center of the next square\n self.unit.go_to_xy(self.target.other_side.place.x, self.target.other_side.place.y) \n elif getattr(self.target, \"place\", None) is self.unit.place:\n self.unit.action_reach_and_use()\n elif self.unit.airground_type == \"air\":\n self.unit.go_to_xy(self.target.x, self.target.y)\n else:\n self.complete()\n\n\nclass MoveXYAction(Action):\n\n timer = 15 # 5 seconds # XXXXXXXX not beautiful\n\n def update(self):\n if self.timer > 0:\n self.timer -= 1\n x, y = self.target\n if self.unit.go_to_xy(x, y):\n self.complete()\n else:\n self.complete()\n\n\nclass AttackAction(Action):\n\n def update(self): # without moving to another square\n if self.unit.range and self.target in self.unit.place.objects:\n self.unit.action_reach_and_use()\n elif self.unit.can_attack(self.target):\n self.unit.aim(self.target)\n else:\n self.complete()\n","sub_path":"soundrts/worldaction.py","file_name":"worldaction.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"324316221","text":"# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# flake8: noqa\n\nimport os\nimport pytest\n\nfrom google.api_core import exceptions\nfrom google.cloud import asset_v1\nfrom google.cloud.asset_v1 import enums\n\nPROJECT_INSIDE = os.environ.get(\"PROJECT_ID\", None)\nPROJECT_OUTSIDE = os.environ.get(\n \"GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT\", None\n)\nIS_INSIDE_VPCSC = os.environ.get(\"GOOGLE_CLOUD_TESTS_IN_VPCSC\", \"true\")\n\n\nclass TestVPCServiceControl(object):\n @staticmethod\n def _is_rejected(call):\n try:\n responses = call()\n except exceptions.PermissionDenied as e:\n return e.message == \"Request is prohibited by organization's policy\"\n except:\n pass\n return False\n\n @staticmethod\n def _do_test(delayed_inside, delayed_outside):\n if IS_INSIDE_VPCSC.lower() == \"true\":\n assert TestVPCServiceControl._is_rejected(delayed_outside)\n assert not (TestVPCServiceControl._is_rejected(delayed_inside))\n else:\n assert not (TestVPCServiceControl._is_rejected(delayed_outside))\n assert TestVPCServiceControl._is_rejected(delayed_inside)\n\n @pytest.mark.skipif(\n PROJECT_INSIDE is None, reason=\"Missing environment variable: PROJECT_ID\"\n )\n @pytest.mark.skipif(\n PROJECT_OUTSIDE is None,\n reason=\"Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT\",\n )\n def test_export_assets(self):\n client = asset_v1.AssetServiceClient()\n output_config = {}\n parent_inside = \"projects/\" + PROJECT_INSIDE\n delayed_inside = lambda: client.export_assets(parent_inside, output_config)\n parent_outside = \"projects/\" + PROJECT_OUTSIDE\n delayed_outside = lambda: client.export_assets(parent_outside, output_config)\n TestVPCServiceControl._do_test(delayed_inside, delayed_outside)\n\n @pytest.mark.skipif(\n PROJECT_INSIDE is None, reason=\"Missing environment variable: PROJECT_ID\"\n )\n @pytest.mark.skipif(\n PROJECT_OUTSIDE is None,\n reason=\"Missing environment variable: GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT\",\n )\n def test_batch_get_assets_history(self):\n client = asset_v1.AssetServiceClient()\n content_type = enums.ContentType.CONTENT_TYPE_UNSPECIFIED\n read_time_window = {}\n parent_inside = \"projects/\" + PROJECT_INSIDE\n delayed_inside = lambda: client.batch_get_assets_history(\n parent_inside, content_type, read_time_window\n )\n parent_outside = \"projects/\" + PROJECT_OUTSIDE\n delayed_outside = lambda: client.batch_get_assets_history(\n parent_outside, content_type, read_time_window\n )\n TestVPCServiceControl._do_test(delayed_inside, delayed_outside)\n","sub_path":"asset/tests/system/test_vpcsc.py","file_name":"test_vpcsc.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"578470673","text":"import transformers\nfrom transformers import BertTokenizerFast as BertTokenizer, BertModel, AdamW, get_linear_schedule_with_warmup\n\nMAX_LEN = 512 # max number of tokens in a sentence\nN_EPOCHS = 4 # number of epochs\nBATCH_SIZE = 10 # batch size of train, eval is same\nCHECKPOINT_PATH = '' #save model checkpoints in this folder\nN_CLASSES = 4\nLEARNING_RATE = 1e-6 # learning rate\n\nTOKENIZER = BertTokenizer.from_pretrained('bert-base-uncased') # bert base uncased tokenizer for fine-tuning\n\nTRAINING_FILE = '' # train data path (use the processed folder in data folder of this repository)\nVALIDATION_FILE = '' # validation data path (use the processed folder in data folder of this repository)\nTESTING_FILE = '' # test data path (use the processed folder in data folder of this repository)\n","sub_path":"veracity_prediction_bert-base/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"413519001","text":"## Set up performance data query\nimport jsonpickle\nimport pandas as pd\nimport requests\nimport csv\nimport json\nimport bs4\n# Set up api for MBTA\nMBTA_api = \"wX9NwuHnZU2ToO7GmGR9uw\"\n\n# make sure I'm in the current directory\nimport os\nos.chdir('/Users/meganhess-homeier/github/transit_delays/gtfs_scrape')\n\n\n#read in origin_dest2 csv with cbd lines removed\norigin_dest2 = pd.read_csv('origin_dest2.csv', sep=',')\n\n#check it out\norigin_dest2.head()\n\n\n\n#check out my origin_dest2 df_columns\norigin_dest2.columns\n\n# set up a data frame to store api request addresses, created from the stops in the origin_dest2 file\n#http://realtime.mbta.com/developer/api/v2.1/traveltimes?api_key=wX9NwuHnZU2ToO7GmGR9uw&format=json&from_stop=11384&to_stop=70061&from_datetime=1514419200&to_datetime=1514505600\n# create a DataFrame\n# pass the origin/ destination to the dataframe as unique rows\n# with the url corresponding to that destination\n# add another column(s) and iterate through the urls to query the data and put it in the\n# columns according to the origin/ destination\n\nsource_target = pd.read_csv('source_target.csv', sep=',')\n\n#create the URL calling from the origin_dest2 df\ndef url_string(x,y):\n url = 'http://realtime.mbta.com/developer/api/v2.1/traveltimes?api_key=wX9NwuHnZU2ToO7GmGR9uw&format=json&from_stop={}&to_stop={}&from_datetime=1514419200&to_datetime=1514505600'.format(x,y)\n return url\n#create input for url_string\n# origin = [origin_dest2.origin_id]\n# dest = [origin_dest2.dest_id]\n# print(url_string(origin,dest))\n# to_stop,from_stop\n# 0,70057,70060\n#\n# url_string('70060','70057')\n\n## for loop for scraping all data from mbta gtfs performance api\nfrom pandas.io.json import json_normalize\n\nall_perf_df = pd.DataFrame()\nfor index, row in source_target.iterrows():\n api_string = url_string(row[\"from_stop\"], row[\"to_stop\"])\n #or_d = pd.DataFrame(data = [,row[\"origin_id\"], row[\"origin_name\"], row[\"dest_id\"], row[\"dest_name\"]], columns = ['origin_id', 'origin_name', 'dest_id', 'dest_name'])\n resp = requests.get(api_string).json()\n pars_resp2 = json.dumps(resp)\n resp_pars = json.loads(pars_resp2)\n perf_df = json_normalize(resp_pars['travel_times'])\n perf_df = perf_df.assign(origin_id = row[\"from_stop\"], dest_id = row[\"to_stop\"])\n all_perf_df = all_perf_df.append(perf_df)\n\nall_perf_df.to_csv('mbta_perf_v3.csv', encoding='utf-8')\n","sub_path":"gtfs_scrape/scraper2.py","file_name":"scraper2.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"466414591","text":"# https://atcoder.jp/contests/abc143/tasks/abc143_b\n\nn = int(input().strip())\nds = list(map(lambda x: int(x.strip()), input().split()))\n\nans = 0\n\nfor i in range(n):\n for j in range(n):\n if (i != j):\n ans += ds[i] * ds[j]\n\nprint(int(ans / 2))\n","sub_path":"beginner_contests/143/B/python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"140799858","text":"__author__ = 'xienanjie'\n\nimport pickle\n\nclass Log(object):\n '''\n @usage: use for handle bh game log file\n '''\n\n def __init__(self, save='savelog.data'):\n '''\n Constructor\n '''\n self._log = []\n self._logfile = []\n self._save = save\n self._group = dict()\n\n def clear(self):\n self._log = []\n self._logfile = []\n self._save = 'savelog.data'\n self._group = dict()\n\n def loadlog(self, filename):\n '''\n @param: filename:\n '''\n if filename in self._logfile:\n print(filename, \"has loaded.\")\n return False\n try:\n with open(filename, 'r') as f:\n linecount = len(f.readlines())\n f.seek(0)\n percent = 0.0\n for i in range(linecount):\n if (i+1)/linecount - percent > 0.01:\n print('[{:3.0f}%]'.format((i+1)/linecount * 100))\n percent = i/linecount\n s = f.readline()\n oneline = s.split(',')\n self._log.append(oneline)\n\n self._logfile.append(filename)\n return True\n except IOError:\n print(\"Error: file'{0}' not Found.\".format(filename))\n return False\n\n # self.groupbyid()\n\n def getfilelist(self):\n return self._logfile\n\n def groupbyid(self):\n '''\n '''\n for line in self._log:\n if line[2] not in self._group:\n self._group[ line[2] ] = []\n self._group[ line[2] ].append(line)\n\n def getgroupbyid(self, groupid):\n if groupid in self._group:\n return self._group[groupid]\n else:\n return []\n\n def listgroupbyid(self, groupid):\n group = self.getgroupbyid(groupid)\n for line in group:\n print(line)\n\n\n def saveinfo(self, filename=''):\n if not filename:\n print(\"save info failed.\")\n return False\n\n try:\n with open(filename, 'wb') as f:\n pickle.dump(self._log, f)\n pickle.dump(self._save, f)\n pickle.dump(self._group, f)\n pickle.dump(self._logfile, f)\n print(\"save info success.\")\n return True\n except:\n print(\"save info failed.\")\n return False\n\n def loadinfo(self, filename=''):\n if not filename:\n filename = self._save\n try:\n with open(filename, \"rb\") as f:\n self._log = pickle.load(f)\n self._save = pickle.load(f)\n self._group = pickle.load(f)\n self._logfile = pickle.load(f)\n print(\"load info success.\")\n except IOError:\n if filename != \"savelog.data\":\n print(\"Error: file'{0}' not Found.\".format(filename))\n except:\n print(\"load info failed.\")\n\n\n\n\nclass LogControl(object):\n def __init__(self):\n self._log = Log()\n\n def loadLog(self, filename=''):\n return self._log.loadlog(filename)\n\n def saveLog(self, filename=''):\n return self._log.saveinfo(filename)\n\n def getLog(self):\n return self._log._log\n\n def getHead(self):\n return ['time','level','account_id','option','function','description']","sub_path":"log/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"231532445","text":"import os.path\nimport tempfile\n\nfrom thryft.compiler import Compiler\nfrom thryft_test import _test\n\n\nclass _GeneratorTest(_test._Test):\n def __init__(self, *args, **kwds):\n self.__generator_class = kwds.pop('generator_class', None)\n self.__generator_kwds = kwds.pop('generator_kwds', {})\n self.__repr_method_name = kwds.pop('repr_method_name', None)\n _test._Test.__init__(self, *args, **kwds)\n\n def _runTest(self, thrift_file_path):\n document_root_dir_path = os.path.dirname(thrift_file_path)\n generator = self.__generator_class(**self.__generator_kwds)\n\n try:\n document = \\\n Compiler(\n document_root_dir_path=document_root_dir_path,\n include_dir_paths=(document_root_dir_path,),\n ).compile(\n generator=generator,\n thrift_file_path=thrift_file_path\n )\n self._save(document)\n except Exception as e:\n raise RuntimeError(\"exception compiling and saving \" + thrift_file_path) from e\n return document\n\n def _save(self, document):\n with tempfile.TemporaryDirectory() as tempdir:\n document.save(out_path=tempdir)\n","sub_path":"compiler/test/thryft_test/_generator_test.py","file_name":"_generator_test.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"463332807","text":"# Instantiates DB handler, starts up\n# scapy's packet inspector, loads\n# signatures to match and passes\n# packages to filter class\nimport os\nimport sys\nimport logging\nfrom sniffy.utils.cfg_loader import load_sigs\nfrom sniffy.core.inspector import Parse, Match\nfrom sniffy.core.process import CheckDB\nfrom sniffy.core.dbconnector import DBH\nimport pprint\n\n\"\"\"\nSuppress scapy warning if no default route for IPv6.\nThis needs to be done before the import from scapy.\n\"\"\"\nlogging.getLogger(\"scapy.runtime\").setLevel(logging.ERROR)\n\n\"\"\"\nTry to import sniff from scapy.all and show error\n w/ install instructions if it cannot be imported.\n\"\"\"\ntry:\n from scapy.all import sniff, IP\nexcept ImportError:\n sys.stderr.write(\"ERROR: You must have scapy installed.\\n\")\n sys.stderr.write(\"You can install it by running: sudo pip install -U 'scapy>=2.3,<2.4'\\n\")\n exit(1)\n\n\"\"\"\nTry to import scapy_http.http and show error\n w/ install instructions if it cannot be imported.\n\"\"\"\ntry:\n #import scapy_http.http\n from scapy.layers import http\nexcept ImportError:\n sys.stderr.write(\"ERROR: You must have scapy-http installed.\\n\")\n sys.stderr.write(\"You can install it by running: sudo pip install scapy-http\\n\")\n exit(1)\n\nclass Sniffy():\n \"\"\"Fireup sniffy engine\n\n Runs sniff from scapy to start capturing packets specified\n according to filters. Packets that pass scapy's filter are\n sent to be parsed, when data is exctracted they are sent\n to match filter to see if the data matches any of the\n signatures provided in the signatures file.\n \"\"\"\n\n def __init__(self, args, log):\n self.args = args\n self.log = log\n self.match = None\n self.dbh = DBH(args.database, args.credsfile, log)\n self.checkdb = CheckDB(self.log, self.dbh)\n self.window = int(os.environ['REQUESTS_WINDOW']) if os.environ.get('REQUESTS_WINDOW') else 180\n self.threshold = int(os.environ['REQUESTS_THRESHOLD']) if os.environ.get('REQUESTS_THRESHOLD') else 5\n self.max_pkt_len = int(os.environ['MAX_PACKET_LENGTH']) if os.environ.get('MAX_PACKET_LENGTH') else 1500\n\n def start(self, msg=\"Sniffer starting up...\"): \n # Load signatures into Match class\n self.load_signatures()\n # Instantiate Parse class\n self.parser = Parse(self.log, self.args, IP, http, self.match)\n\n self.log.info(msg)\n self.log.debug(\"Interface: %s, Filter: %s, Count: %s\" \\\n % (self.args.interface, self.args.filter, self.args.count))\n\n \"\"\" Start up scapy's sniffer \"\"\"\n sniff(iface=self.args.interface,\n promisc=False,\n filter=self.args.filter,\n lfilter=lambda x: x[IP].len <= self.max_pkt_len,\n prn=self._parse_output,\n store=0,\n count=self.args.count)\n\n \"\"\" Load pattern signatures to match \"\"\"\n def load_signatures(self): \n signatures = load_sigs(self.args.sigfile, self.log)\n if type(signatures) is dict and signatures.has_key('error'):\n self.log.error(\"Sig errors: %s\" % signatures['error'])\n exit(1)\n\n self.match = Match(signatures, self.log)\n\n \"\"\" Parse packet output \"\"\"\n def _parse_output(self, out):\n \"\"\"Handle packet received by scapy\n\n This method is called by scapy's sniff on packets\n it finds. Packets will first be sent to parser\n to extract data (or try reassembly of multiple\n packet fragemnts). If data is returned from parser\n they are forwarded to class of match filter.\n\n - **parameters**, **types**\n\n :param out: Packet received by scapy\n :type out: \n\n - **variables, **types**\n :var data: Data parsed from packet\n :var result: Data returned by match filter\n :type data: dict\n :type result: dict\n\n \"\"\"\n\n #if out.haslayer(http.HTTPRequest):\n # pprint.pprint(out.getlayer(http.HTTPRequest).fields, indent=4, width=1)\n\n #pprint.pprint(out.getlayer(IP).payload)\n\n data = self.parser.parse_output(out)\n if type(data) is dict:\n if data.has_key('request'):\n return_request = data['request']\n elif data.has_key('skip') and data['skip'] == True:\n if data.has_key('msg') and data['msg']:\n self.log.debug(\"Skipped packet, msg=%s\" % data['msg'])\n return\n else:\n return\n\n # Match to loaded signatures\n result = self.match.filter(return_request)\n if result == None:\n return\n\n kwargs = {'result': result,\n 'dbh': self.dbh,\n 'email': self.args.notify,\n 'log': self.log,\n 'window': self.window,\n 'threshold': self.threshold}\n\n # Check DB, write to DB\n self.checkdb.check(**kwargs)\n","sub_path":"lib/sniffy/core/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":4608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"258074855","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport matplotlib\nmatplotlib.rcParams['mathtext.fontset'] = 'stix'\nmatplotlib.rcParams['font.family'] = 'STIXGeneral'\n\ndef sig(x):\n return 1/(1 + np.exp(-x))\n\ndef grid_multivariate(grid, prec, m, quad = False):\n\n z = grid - m\n Q = np.sum(z.dot(prec)*z, axis = 2)\n if quad:\n return -1/2*Q\n \n return np.exp(-1/2*Q)\n \n\nnp.random.seed(0)\nno_points = 20\nx = np.load('x.npy')[:no_points, :]\ny = np.load('y.npy')[:no_points, :]\nos.chdir(os.getcwd() + '/rich_images/bayesian_master_pred')\n\nSw = np.array([[10, 0],\n [0, 10]], dtype = 'float64')\nMw = np.array([[0, 0]], dtype = 'float64').T\n\nb_ax = np.linspace(-10, 10, 275*2)\nw_ax = np.linspace(-10, 10, 300*2)\nb_, w_ = np.meshgrid(b_ax, w_ax)\ngrid = np.stack([b_, w_], axis = 2)\ndb = (b_.max() - b_.min())/(b_.shape[0])\ndw = (w_.max() - w_.min())/(w_.shape[0])\ndA = (b_.max() - b_.min())/(b_.shape[0])\ndA = db*dw\n\nfig = plt.figure(figsize=(5, 5))\n\nrate = 0.01\nw = np.array([[0, 0]], dtype = 'float64').T\nfor i in range(1000):\n grad = -np.linalg.inv(Sw).dot(w - Mw)\n sigmas = sig(x.dot(w))\n lik_term = np.sum((y*(1 - sigmas) - (1 - y)*sigmas)*x, axis = 0)\n lik_term = lik_term.reshape((-1, 1))\n grad += lik_term\n w += grad*rate\n\nsigmas = sig(x.dot(w))\nS_g = np.linalg.inv(Sw) + (x.T).dot(x*sigmas*(1-sigmas))\n \nplt.subplot(111)\neps = 10**-12\nsigs = sig((grid).dot(x.T))*(1 - 2*eps) + eps\ny = np.reshape(y, (-1))\nlik = np.prod((sigs**y)*(1-sigs)**(1-y), axis = 2)\nlog_lik = np.sum(y*np.log(sigs) + (1-y)*np.log(1-sigs), axis = 2)\nprior_Q = np.sum(grid.dot(np.linalg.inv(Sw))*grid, axis = 2)\nprior = np.exp(-1/2*prior_Q)\nlog_joint = -1/2*prior_Q + log_lik\n\nprec_b = 10\nprec_w = 10\nvar_b, var_w = 1/prec_b, 1/prec_w\nm_b = 0\nm_w = 0\n\nPrec_w = np.array([[0, 0],\n [0, prec_w]], dtype = 'float64')\nM_w = np.array([[0, m_w]], dtype = 'float64')\n\nPrec_b = np.array([[prec_b, 0],\n [0, 0]], dtype = 'float64')\nM_b = np.array([[m_b, 0]], dtype = 'float64')\n\nfor i in range(1001):\n\n Prec_b[0, 0] = prec_b\n Prec_w[1, 1] = prec_w\n \n M_b[0, 0] = m_b\n M_w[0, 1] = m_w\n\n Q_w = grid_multivariate(grid, Prec_w, M_w)\n Q_w /= (Q_w.sum()*dA)\n Q_b = grid_multivariate(grid, Prec_b, M_b)\n Q_b /= (Q_b.sum()*dA)\n \n lnp_w = np.sum(Q_b*log_joint*db, axis = 1)\n lnp_b = np.sum(Q_w*log_joint*dw, axis = 0)\n\n q_w = Q_w[:, 0]\n q_w /= (q_w.sum() * dw)\n q_b = Q_b[0, :]\n q_b /= (q_b.sum() * db)\n\n quad_w = grid_multivariate(grid, Prec_w, M_w, quad = True)[:, 0]\n quad_w += np.log(2*np.pi*prec_w)\n quad_b = grid_multivariate(grid, Prec_b, M_b, quad = True)[0, :]\n quad_b += np.log(2*np.pi*prec_b)\n \n dq_dmb = q_b*(prec_b*(b_ax - m_b))\n dq_dmw = q_w*(prec_w*(w_ax - m_w))\n dq_dsb = q_b*(prec_b**2/2*(b_ax - m_b)**2) - 0.5*q_b/(2*np.pi)**0.5*prec_b**1.5\n dq_dsw = q_w*(prec_w**2/2*(w_ax - m_w)**2) - 0.5*q_w/(2*np.pi)**0.5*prec_w**1.5\n\n grad_mb = np.sum(lnp_b*dq_dmb*db) - np.sum((1 + quad_b)*dq_dmb*db)\n grad_mw = np.sum(lnp_w*dq_dmw*dw) - np.sum((1 + quad_w)*dq_dmw*dw)\n\n grad_sb = np.sum(lnp_b*dq_dsb*db) - np.sum((1 + quad_b)*dq_dsb*db)\n grad_sw = np.sum(lnp_w*dq_dsw*dw) - np.sum((1 + quad_w)*dq_dsw*dw)\n\n \n m_b += grad_mb\n m_w += grad_mw\n \n var_b += 0.1*grad_sb\n var_w += 0.1*grad_sw\n\n prec_b = 1/var_b\n prec_w = 1/var_w\n\n \n if i % 50 == 0:\n L = np.sum(Q_w*Q_b*(log_joint - np.log(Q_w) + np.log(Q_b)))\n print(L)\n print(i)\n print(var_b**0.5, var_w**0.5, m_b, m_w)\n print(grad_mb, grad_mw, grad_sb, grad_sw)\n \n plt.contourf(-b_, w_, lik*prior,\n cmap = 'coolwarm', alpha = 0.5)\n prec = np.array([[prec_b, 0],\n [0, prec_w]])\n mu = np.array([m_b, m_w])\n approx = grid_multivariate(grid, prec, mu)\n plt.contour(-b_, w_, approx, linecolor = 'black')\n plt.scatter(-m_b, m_w, marker = 'x', color = 'black')\n plt.title('Posterior', fontsize = 18)\n plt.xlabel(r'$b$', fontsize = 18)\n plt.ylabel(r'$w$', fontsize = 18)\n plt.xticks(fontsize = 13)\n plt.yticks(fontsize = 13)\n plt.locator_params(axis='y', nbins = 5)\n plt.locator_params(axis='x', nbins = 5)\n plt.tight_layout()\n\n plt.show()\n plt.clf()\n\n\n\n","sub_path":"classification/vi.py","file_name":"vi.py","file_ext":"py","file_size_in_byte":4383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"592316136","text":"from django.db import models\n\n\nclass SavedExercise(models.Model):\n name = models.ForeignKey(\n verbose_name=\"name\",\n to='source.Exercise',\n blank=True,\n null=True,\n on_delete=models.CASCADE\n )\n body_part = models.ForeignKey(\n verbose_name=\"body part\",\n blank=True,\n null=True,\n to='source.Category',\n on_delete=models.CASCADE\n )\n muscle_group = models.ForeignKey(\n verbose_name=\"muscle_group\",\n blank=True,\n null=True,\n to='source.Muscle',\n on_delete=models.CASCADE,\n )\n sets = models.IntegerField(\n verbose_name=\"sets\",\n default=3\n )\n repetitions = models.IntegerField(\n verbose_name=\"repetitions\",\n default=10\n )\n weight = models.IntegerField(\n verbose_name=\"weight\",\n default=0\n )\n saved_workout = models.ForeignKey(\n to='saved_workout.SavedWorkout',\n on_delete=models.CASCADE,\n related_name='exercises'\n )\n","sub_path":"app/project/saved_exercise/models/saved_exercise.py","file_name":"saved_exercise.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"426646909","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os.path\nimport sys\nimport tornado.auth\nimport tornado.escape\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.options\nimport tornado.web\nfrom tornado.options import define, options\nfrom log import log_file\nfrom Nginx import Nginx\n\nreload(sys)\nsys.setdefaultencoding('utf8')\n\ndefine(\"port\", default=58000, help=\"run on the given port\", type=int)\n\nclass Application(tornado.web.Application):\n def __init__(self):\n handlers = [\n # (r\"/\", testHandler),\n (r\"/\", NginxHandler),\n\n ]\n settings = dict(\n template_path=os.path.join(os.path.dirname(__file__), \"templates\"),\n static_path=os.path.join(os.path.dirname(__file__), \"static\"),\n debug=True,\n )\n # conn = MongoClient(\"192.168.1.203\", 57017)\n # self.db = conn[\"demo\"]\n tornado.web.Application.__init__(self, handlers, **settings)\n\n\nclass NginxHandler(tornado.web.RequestHandler):\n \"tornado_nginx config reset add del\"\n def get(self):\n \"\"\n self.render(\"index.html\")\n def post(self):\n \"\"\n # user = self.get_argument('user')\n # passwd = self.get_argument('passwd')\n domain = self.get_argument('domain').strip().lower()\n ip = self.get_argument('ip','127.0.0.1:80').strip()\n\n act = self.get_argument('act').strip().lower()\n if self.request.remote_ip in ['192.168.1.6','192.168.1.254','127.0.0.1']:\n if act in ['add','del']:\n log_file(\"\"\"\n客户端:\\t %s \\n\n客户端浏览器:\\t %s \\n\n操作域名:\\t %s \\n\n映射IP:\\t %s \\n\n动作:\\t %s \\n\n \"\"\"%(self.request.remote_ip,self.request.headers['user-agent'],domain,ip,act))\n x = Nginx(domain,ip)\n if x.check():\n if act == 'add' :\n port = int(ip.split(':')[-1])\n if port < 65535 and port > 0:\n if x.check() and x.Add():\n log_file(\"状态:\\t OK \\n\")\n self.write(\"OK\")\n else:\n log_file(\"状态:\\t ERROR \\n\")\n self.write(\"ERROR FROM NGINX ADD\")\n else:\n self.write(\"ERROR FROM PORT >0 <65535\")\n else:\n if x.check() and x.Del() and x.reload():\n log_file(\"状态:\\t OK \\n\")\n self.write(\"OK\")\n else:\n log_file(\"状态:\\t ERROR \\n\")\n self.write(\"ERROR FORM NGINX DEL\")\n else:\n self.write(\"ERROR FORM NGINX NOT START\")\n else:\n self.write(\"ERROR FROM ACT\")\n else:\n self.write(\"ERROR FROM ACT ONE\")\n\n\n\n\ndef main():\n tornado.options.parse_command_line()\n http_server = tornado.httpserver.HTTPServer(Application())\n http_server.listen(options.port)\n tornado.ioloop.IOLoop.instance().start()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tornado/old_nginx_config/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":3201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"142188113","text":"# SOURCES AND OMH IN MAIN CODE \n# import package for pygame, csv, and import the Create class from init_barrier.py in order to access getter and setter functions\nimport pygame\nimport csv\nfrom init_barrier import Create\n\n# Barriers class makes and displays the barriers/walls that are colored purple, green, or black on the background\nclass Barriers():\n\n\t# constructs the barrier/wall arrangement by passing in the game surface, and making a barriers list by taking in values from a csv file called barriers.csv which contains the parameters for the barriers\n\tdef __init__(self, surface):\n\t\tsuper().__init__()\n\t\tself.surface = surface\n\n\t\t# reads in barrier parameters from csv and creates objects\n\t\tself.barriers = []\n\t\twith open('barriers.csv') as csv_file:\n\t\t\tcsv_reader = csv.reader(csv_file, delimiter=',')\n\t\t\tfor row in csv_reader:\n\t\t\t\tself.barriers.append(Create(row[0], row[1], row[2], row[3], row[4]))\n\t\t\t\n\t\n\t# use the getters and setters from init_barrier to, based on the kind of wall (1 = purple, 2 = black, 3 = green safe space) display the wall by getting the values of csv file that specifies the dimensions for that specific wall\n\tdef display(self, choice):\n\t\t# choice dictates which barriers we want to display. Since barriers with kind == 1 are the walls that the player can't go past, we want to continously redraw them. So we call display(2).\n\t\tif choice == 2:\n\t\t\tfor location in self.barriers:\n\t\t\t\tif location.getKind() == 1: #purple walls\n\t\t\t\t\tpygame.draw.rect(self.surface, (255, 100, 255), (location.getPosx(), location.getPosy(), location.getDimw(), location.getDimh()))\n\n\t\t# otherwise, we just want to display the black and green spaces once, so they aren't drawn over the balls or player.\n\t\tif choice == 1:\n\t\t\tfor location in self.barriers:\n\t\t\t\tif location.getKind() == 2: #black space\n\t\t\t\t\tpygame.draw.rect(self.surface, (0,0,0), (location.getPosx(), location.getPosy(), location.getDimw(), location.getDimh()))\n\n\t\t\t\telif location.getKind() == 3: #green safe space\n\t\t\t\t\tpygame.draw.rect(self.surface, (171, 254, 171), (location.getPosx(), location.getPosy(), location.getDimw(), location.getDimh()))\n\n","sub_path":"WorldsHardestGame/barriers.py","file_name":"barriers.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"349777333","text":"class Dog():\n\n # Class Attribute\n species = 'mammal'\n\n # Initializer / Instance Attributes\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n\n# Instantiate the Dog object\nphilo = Dog(\"Philo\", 5)\nmikey = Dog(\"Mikey\", 6)\ndoggo = Dog(\"Mikey\", 10)\n\n# Access the instance attributes\nprint(\"The oldest dog is {} years old\".format(\n max(philo.age, mikey.age, doggo.age)))\n","sub_path":"book_1/chp19/oldest_dog-own.py","file_name":"oldest_dog-own.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"574954189","text":"from basketball_reference_scraper.teams import get_roster, get_team_stats, get_opp_stats, get_roster_stats, get_team_misc\nfrom basketball_reference_scraper.seasons import get_schedule\nfrom basketball_reference_scraper.box_scores import get_box_scores\nfrom utils import *\nimport sys\nimport pandas as pd\nimport numpy as np\npd.options.mode.chained_assignment = None\n\n# TODO: \n# - Get injury report\n# - Check if away team has anyone on the report\n# - Check if home team has anyone on the report\n# - If so, get the player and return his win shares\n# - Add up total missing win shares and create a column displaying that\n# - Standardize it\n\ndef convert_team_names(entire_schedule):\n team_abbreviations = {'Atlanta Hawks' : 'ATL', \n 'Brooklyn Nets' : 'BRK', \n 'Boston Celtics' : 'BOS',\n 'Charlotte Hornets' : 'CHO',\n 'Chicago Bulls' : 'CHI',\n 'Cleveland Cavaliers' : 'CLE',\n 'Dallas Mavericks' : 'DAL',\n 'Denver Nuggets' : 'DEN',\n 'Detroit Pistons' : 'DET',\n 'Golden State Warriors' : 'GSW',\n 'Houston Rockets' : 'HOU',\n 'Indiana Pacers' : 'IND',\n 'Los Angeles Clippers' : 'LAC',\n 'Los Angeles Lakers' : 'LAL', \n 'Memphis Grizzlies' : 'MEM', \n 'Miami Heat' : 'MIA', \n 'Milwaukee Bucks' : 'MIL', \n 'Minnesota Timberwolves' : 'MIN', \n 'New Orleans Pelicans' : 'NOP', \n 'New York Knicks' : 'NYK', \n 'Oklahoma City Thunder' : 'OKC', \n 'Orlando Magic' : 'ORL', \n 'Philadelphia 76ers' : 'PHI', \n 'Phoenix Suns' : 'PHO', \n 'Portland Trail Blazers' : 'POR', \n 'Sacramento Kings' : 'SAC', \n 'San Antonio Spurs' : 'SAS', \n 'Toronto Raptors' : 'TOR', \n 'Utah Jazz' : 'UTA', \n 'Washington Wizards' : 'WAS'}\n\n team_abbreviations_keys = list(team_abbreviations.keys())\n\n row = 0\n for home_team in entire_schedule['HOME']:\n for i in range(0, 30):\n if team_abbreviations_keys[i] in home_team:\n entire_schedule.at[row, 'HOME'] = team_abbreviations[team_abbreviations_keys[i]]\n row += 1\n row = 0\n for away_team in entire_schedule['VISITOR']:\n for i in range(0, 30):\n if team_abbreviations_keys[i] in away_team:\n entire_schedule.at[row, 'VISITOR'] = team_abbreviations[team_abbreviations_keys[i]]\n row += 1\n \n return entire_schedule\n\ndef add_winner_column(entire_schedule):\n winner = []\n for index, row in entire_schedule.iterrows():\n if row['HOME_PTS'] > row['VISITOR_PTS']:\n winner.insert(index, 1)\n else:\n winner.insert(index, 0)\n entire_schedule[\"WINNER\"] = winner\n return entire_schedule\n\n# def add_injury_columns(entire_schedule):\n\n\ndef add_team_stats(entire_schedule, year):\n team_misc_2020 = getTeamMisc(year)\n team_misc_2019 = getTeamMisc(year - 1)\n last_year_win_percentage_home = []\n last_year_win_percentage_visitor = []\n count = 1\n for index, row in entire_schedule.iterrows():\n home = team_misc_2019.loc[team_misc_2019['TEAM'] == row['HOME'], 'W'].values[0]\n visitor = team_misc_2019.loc[team_misc_2019['TEAM'] == row['VISITOR'], 'W'].values[0]\n\n last_year_win_percentage_home.insert(index, home/82)\n last_year_win_percentage_visitor.insert(index, visitor/82)\n\n home_team_season_stats = team_misc_2020[team_misc_2020['TEAM'] == row['HOME']].add_prefix(\"HOME_\")\n visitor_team_season_stats = team_misc_2020[team_misc_2020['TEAM'] == row['VISITOR']].add_prefix(\"VISITOR_\")\n\n home_team_season_stats = home_team_season_stats[['HOME_NRtg', 'HOME_DRtg', 'HOME_ORtg', 'HOME_DRB%', 'HOME_SRS', 'HOME_MOV', \n 'HOME_PACE']]\n visitor_team_season_stats = visitor_team_season_stats[['VISITOR_NRtg', 'VISITOR_DRtg', 'VISITOR_ORtg', 'VISITOR_DRB%', 'VISITOR_SRS', 'VISITOR_MOV',\n 'VISITOR_PACE']]\n \n if count == 1:\n home_team_stats = home_team_season_stats\n visitor_team_stats = visitor_team_season_stats\n else:\n home_team_stats = pd.concat([home_team_stats, home_team_season_stats])\n visitor_team_stats = pd.concat([visitor_team_stats, visitor_team_season_stats])\n count += 1\n\n entire_schedule = entire_schedule.reset_index(drop=True)\n home_team_stats = home_team_stats.reset_index(drop=True)\n visitor_team_stats = visitor_team_stats.reset_index(drop=True)\n entire_schedule = pd.concat([entire_schedule, home_team_stats, visitor_team_stats], axis=1, sort=True)\n entire_schedule[\"HOME_LAST_SEASON_W%\"] = last_year_win_percentage_home\n entire_schedule[\"VISITOR_LAST_SEASON_W%\"] = last_year_win_percentage_visitor\n entire_schedule[\"HOME_GAME\"] = [1] * len(entire_schedule)\n entire_schedule[\"AWAY GAME\"] = [0] * len(entire_schedule)\n return entire_schedule\n\ndef add_win_percentage(entire_schedule):\n teams = [\n 'ATL','BRK','BOS','CHO','CHI','CLE','DAL','DEN',\n 'DET','GSW','HOU','IND','LAC','LAL','MEM','MIA', \n 'MIL','MIN','NOP','NYK','OKC','ORL','PHI','PHO', \n 'POR','SAC','SAS','TOR','UTA','WAS']\n init = [0] * 30\n data = {'wins' : init, 'losses' : init}\n\n win_loss = pd.DataFrame(data, index=teams)\n home_win_percentage = []\n visitor_win_percentage = []\n for index, row in entire_schedule.iterrows():\n \n if (win_loss.loc[row['HOME'], 'wins'] + win_loss.loc[row['HOME'], 'losses']) == 0:\n if win_loss.loc[row['HOME'], 'wins'] > 0:\n home_win_per = 1\n else:\n home_win_per = 0\n else:\n home_wins = win_loss.loc[row['HOME'], 'wins']\n home_win_per = home_wins / (win_loss.loc[row['HOME'], 'wins'] + win_loss.loc[row['HOME'], 'losses'])\n\n if (win_loss.loc[row['VISITOR'], 'wins'] + win_loss.loc[row['VISITOR'], 'losses']) == 0:\n if win_loss.loc[row['VISITOR'], 'wins'] > 0:\n visitor_win_per = 1\n else:\n visitor_win_per = 0\n else:\n visitor_wins = win_loss.loc[row['VISITOR'], 'wins']\n visitor_win_per = visitor_wins / (win_loss.loc[row['VISITOR'], 'wins'] + win_loss.loc[row['VISITOR'], 'losses'])\n\n home_win_percentage.insert(index, home_win_per)\n visitor_win_percentage.insert(index, visitor_win_per)\n\n if row['WINNER'] == 1:\n win_loss.at[row['HOME'],'wins'] = win_loss.loc[row['HOME'],'wins'] + 1\n win_loss.at[row['VISITOR'],'losses'] = win_loss.loc[row['VISITOR'],'losses'] + 1\n elif row['WINNER'] == 0:\n win_loss.at[row['VISITOR'],'wins'] = win_loss.loc[row['VISITOR'],'wins'] + 1\n win_loss.at[row['HOME'],'losses'] = win_loss.loc[row['HOME'],'losses'] + 1\n\n entire_schedule[\"HOME_W%\"] = home_win_percentage\n entire_schedule[\"VISITOR_W%\"] = visitor_win_percentage\n return entire_schedule\n\ndef add_home_away_splits(entire_schedule):\n teams = [\n 'ATL','BRK','BOS','CHO','CHI','CLE','DAL','DEN',\n 'DET','GSW','HOU','IND','LAC','LAL','MEM','MIA', \n 'MIL','MIN','NOP','NYK','OKC','ORL','PHI','PHO', \n 'POR','SAC','SAS','TOR','UTA','WAS']\n init = [0] * 30\n data = {'home_wins' : init, 'home_losses' : init, 'road_wins': init, 'road_losses' : init}\n home_road_split = pd.DataFrame(data, index=teams)\n home_win_at_home = []\n visitor_win_on_road = []\n for index, row in entire_schedule.iterrows():\n if (home_road_split.loc[row['HOME'], 'home_wins'] + home_road_split.loc[row['HOME'], 'home_losses']) == 0:\n if home_road_split.loc[row['HOME'], 'home_wins'] > 0:\n home_win_per = 1\n else:\n home_win_per = 0\n else:\n home_wins = home_road_split.loc[row['HOME'], 'home_wins']\n home_win_per = home_wins / (home_road_split.loc[row['HOME'], 'home_wins'] + home_road_split.loc[row['HOME'], 'home_losses'])\n\n if (home_road_split.loc[row['VISITOR'], 'road_wins'] + home_road_split.loc[row['VISITOR'], 'road_losses']) == 0:\n if home_road_split.loc[row['VISITOR'], 'road_wins'] > 0:\n visitor_win_per = 1\n else:\n visitor_win_per = 0\n else:\n visitor_wins = home_road_split.loc[row['VISITOR'], 'road_wins']\n visitor_win_per = visitor_wins / (home_road_split.loc[row['VISITOR'], 'road_wins'] + home_road_split.loc[row['VISITOR'], 'road_losses'])\n\n home_win_at_home.insert(index, home_win_per)\n visitor_win_on_road.insert(index, visitor_win_per)\n\n if row['WINNER'] == 1:\n home_road_split.at[row['HOME'],'home_wins'] = home_road_split.loc[row['HOME'],'home_wins'] + 1\n home_road_split.at[row['VISITOR'],'road_losses'] = home_road_split.loc[row['VISITOR'],'road_losses'] + 1\n elif row['WINNER'] == 0:\n home_road_split.at[row['VISITOR'],'road_wins'] = home_road_split.loc[row['VISITOR'],'road_wins'] + 1\n home_road_split.at[row['HOME'],'home_losses'] = home_road_split.loc[row['HOME'],'home_losses'] + 1\n\n entire_schedule['HOME_W%_AT_HOME'] = home_win_at_home\n entire_schedule['VISITOR_W%_ON_ROAD'] = visitor_win_on_road\n return entire_schedule\n\ndef add_second_of_b2b(entire_schedule):\n teams_that_played_last_night = []\n teams_that_played_today = []\n home_team_b2b = []\n visitor_team_b2b = []\n last_night_day = \"Monday\"\n\n for index, row in entire_schedule.iterrows():\n day = pd.Timestamp.day_name(row['DATE'])\n \n if get_next_day(last_night_day) != day:\n teams_that_played_last_night = teams_that_played_today\n teams_that_played_today = []\n last_night_day = get_day_before(day)\n \n if get_next_day(last_night_day) == day:\n if row['HOME'] in teams_that_played_last_night:\n home_team_b2b.insert(index, 1)\n else:\n home_team_b2b.insert(index, 0)\n\n if row['VISITOR'] in teams_that_played_last_night:\n visitor_team_b2b.insert(index, 1)\n else:\n visitor_team_b2b.insert(index, 0)\n \n teams_that_played_today.append(row['HOME'])\n teams_that_played_today.append(row['VISITOR'])\n\n entire_schedule[\"HOME_B2B\"] = home_team_b2b\n entire_schedule[\"VISITOR_B2B\"] = visitor_team_b2b\n return entire_schedule\n\ndef add_last_twenty(entire_schedule):\n team_last_twenty = get_teams_last_twenty_init()\n home_last_twenty_percent = []\n visitor_last_twenty_percent = []\n for index, row in entire_schedule.iterrows():\n\n home_last_twenty_percent.append(get_win_loss_percentage_last_twenty(row['HOME'], team_last_twenty))\n visitor_last_twenty_percent.append(get_win_loss_percentage_last_twenty(row['VISITOR'], team_last_twenty))\n \n if not np.isnan(row['WINNER']):\n temp_home = team_last_twenty[row['HOME']]\n temp_home.pop(0)\n temp_visitor = team_last_twenty[row['VISITOR']]\n temp_visitor.pop(0)\n\n if row['WINNER'] == 1:\n temp_home.append('W')\n temp_visitor.append('L')\n team_last_twenty.update({row['HOME'] : temp_home})\n team_last_twenty.update({row['VISITOR'] : temp_visitor})\n elif row['WINNER'] == 0:\n temp_home.append('L')\n temp_visitor.append('W')\n team_last_twenty.update({row['HOME'] : temp_home})\n team_last_twenty.update({row['VISITOR'] : temp_visitor})\n\n entire_schedule[\"HOME_LAST_20\"] = home_last_twenty_percent\n entire_schedule[\"VISITOR_LAST_20\"] = visitor_last_twenty_percent\n return entire_schedule\n\ndef finalize_csv(name, year):\n entire_schedule = get_schedule(year, playoffs=False)\n convert_team_names(entire_schedule)\n add_winner_column(entire_schedule)\n entire_schedule = add_win_percentage(add_team_stats(entire_schedule, year))\n entire_schedule = add_second_of_b2b(entire_schedule)\n entire_schedule = add_home_away_splits(entire_schedule)\n entire_schedule = add_last_twenty(entire_schedule)\n entire_schedule.to_csv(name, index=False)\n \n sys.stderr.write(\"[PROGRESS] CSV file creation completed!\")\n sys.stderr.flush()\n \n return name\n\nfinalize_csv(\"season_2017.csv\", 2017)","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":12836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"620397199","text":"def unikati(s):\n nov_seznam=[]\n for x in s:\n if x not in nov_seznam:\n nov_seznam.append(x)\n return nov_seznam\n\ndef avtor(tvit):\n return tvit[0:tvit.find(\":\")]\n\ndef vsi_avtorji(tviti):\n seznam = []\n for tvit in tviti:\n seznam.append(avtor(tvit))\n return unikati(seznam)\n\ndef izloci_besedo(beseda):\n while beseda[0].isalnum() == False:\n beseda = beseda.strip(beseda[0])\n while beseda[-1].isalnum() == False:\n beseda = beseda.rstrip(beseda[-1])\n return beseda\n\ndef se_zacne_z(tvit, c):\n nov_seznam = []\n seznam = tvit.split()\n for x in seznam:\n if x[0] == c:\n nov_seznam.append(izloci_besedo(x))\n return nov_seznam\n\ndef zberi_se_zacne_z(tviti, c):\n seznam = []\n for tvit in tviti:\n seznam += se_zacne_z(tvit, c)\n return unikati(seznam)\n\ndef vse_afne(tviti):\n return zberi_se_zacne_z(tviti, \"@\")\n\ndef vsi_hashtagi(tviti):\n return zberi_se_zacne_z(tviti, \"#\")\n\ndef vse_osebe(tviti):\n return sorted(unikati(vsi_avtorji(tviti) +\n vse_afne(tviti)),\n key=str.lower)\n\n\n\n\ndef besedilo(tvit):\n return tvit[tvit.find(\":\")+2 : len(tvit)]\n\ndef zadnji_tvit(tviti):\n s = {}\n for tvit in tviti:\n s[avtor(tvit)] = besedilo(tvit)\n return s\n\ndef prvi_tvit(tviti):\n s = {}\n for tvit in tviti:\n if avtor(tvit) not in s:\n s[avtor(tvit)] = besedilo(tvit)\n return s\n\n\ndef prestej_tvite(tviti):\n s = {}\n for tvit in tviti:\n if avtor(tvit) not in s:\n s[avtor(tvit)] = 0\n\n s[avtor(tvit)] += 1\n return s\n\ndef omembe(tviti):\n s = {}\n for tvit in tviti:\n if avtor(tvit) not in s:\n s[avtor(tvit)] = []\n s[avtor(tvit)].extend(se_zacne_z(tvit, \"@\"))\n return s\n\ndef neomembe(ime, omembe):\n s = {}\n list_kljucev = []\n seznam_neomenjenih = []\n for x in omembe:\n list_kljucev.append(x)\n for y in list_kljucev:\n if y not in omembe[ime] and y != ime:\n seznam_neomenjenih.append(y)\n return seznam_neomenjenih\n\ndef se_poznata(ime1, ime2, omembe):\n vsa_imena = []\n for x in omembe:\n vsa_imena.append(x)\n if ime1 and ime2 in vsa_imena:\n if ime1 in omembe[ime2] or \\\n ime2 in omembe[ime1]:\n return True\n return False\n\ndef hashtagi(tviti):\n s = dict.fromkeys(vsi_hashtagi(tviti))\n for x in s:\n s[x]=[]\n for tvit in tviti:\n if x in tvit:\n s[x].append(avtor(tvit))\n s[x].sort()\n return s\n","sub_path":"code/batch-1/vse-naloge-brez-testov/DN6-M-211.py","file_name":"DN6-M-211.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"329118589","text":"def vector_v_complex(dv_massiv):\n complex_num = list()\n for i in range(len(dv_massiv)):\n complex_num.append(complex(dv_massiv[i][0], dv_massiv[i][1]))\n return complex_num\n\n\n# a = [[1, 2], [3, 4], [5, 6]]\n# b = vector_v_complex(a)\n\n# ввод числа, преобразование типа в int\na = int(input())\n\n# находит цифры числа\nc = a % 10\nb = a % 100 // 10\nd = a // 100\n\n# находит максимальную цифру числа\nif(b > c):\n if (b > d):\n print(b)\nif(c > b):\n if(c > d):\n print(c)\nif(d > b):\n if (d > c):\n print(d)\nif(c == b == d):\n print(c)\n","sub_path":"first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"177834418","text":"\"\"\"\nCreated on 13/07/2015\n@author: Aitor Gomez Goiri \n\nBuiltin server for development.\n\nConfiguration file path is read from program args.\n\"\"\"\n\nfrom argparse import ArgumentParser\nfrom ptinstancemanager.config import configuration\n\n\ndef main(config_file, create_database, port_number):\n\tconfiguration.set_file_path(config_file)\n\tfrom ptinstancemanager.main import load_app, load_db\n\tapp = load_app()\n\n\tif create_database:\n\t\tdb = load_db()\n\t\tdb.create_all() # By default it doesn't create already created tables\n\t\tfrom ptinstancemanager.models import init_database\n\t\tinit_database(db, app.config['LOWEST_PORT'], app.config['HIGHEST_PORT'])\n\telse:\n\t\t# We don't run the app in the database creation mode.\n\t\t# Otherwise on flask's automatic restarts it will try to create the database and data again!\n\t\tapp.run(host='0.0.0.0', port=port_number, debug=True)\n\n\ndef entry_point():\n\tparser = ArgumentParser(description='Run sample web server which uses ptinstancemanager.')\n\tparser.add_argument('-createdb', action='store_true', dest='create_db',\n\t help='Do you want to create the database? (needed at least the first time)')\n\tparser.add_argument('-config', default='../../config.ini', dest='config',\n\t help='Configuration file.')\n\tparser.add_argument('-port', type=int, default=5000, dest='port',\n\t help='Port were the server will listen.')\n\targs = parser.parse_args()\n\n\t# Builtin server for development.\n\tmain(args.config, args.create_db, args.port)\n\n\n\nif __name__ == \"__main__\":\n\tentry_point()\n","sub_path":"src/ptinstancemanager/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"500687135","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 14 13:00:59 2018\n@author: Andrew Kavas\n\"\"\"\n\n# Heat Equation\n# du/dt = a d^2u/dt^2\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\n\nnt = 10000\nnx = 10\n\nu = np.zeros((nt+1, nx+1))\nu[:, 0] = u[:, -1] = 0\n\nx_fin = 10\nt_fin = 200\ndx = x_fin/nx\ndt = t_fin/nt\n\nalpha = .127 # per meter\n\nxarr = np.linspace(0, x_fin, nx+1)\n\n# u(t=0) = x(-x)\nfor kk in range(0, nx+1):\n u[0, kk] = dx*kk*(1-dt*kk)\n\nfor n in range(0, nt):\n for m in range(1, nx):\n u[n+1, m] = (alpha*dt/dx**2)*(u[n, m+1] - 2*u[n, m] + u[n, m-1]) + u[n, m]\n\n# u[n+1,m]-u[n,m] = alpha*dt/dx**2\n\nfig, ax = plt.subplots()\n\n# x = np.arange(0, 4*np.pi, 0.001)\n# y = np.arange(0,4*np.pi,0.001)\n# plt.plot(t,x,t,y)\n# plt.show()\n\nline, = ax.plot([], [], 'o-')\n\n\ndef animate(i):\n # print(len(xarr),len(u[i]))\n line.set_data(xarr,u[i])\n # line.set_ydata(np.cos(2*x + i/5.0))\n\n return line,\n\n\ndef init():\n line.set_data([],[])\n # line.set_xdata(np.ma.array(x, mask=True))\n # line.set_ydata(np.ma.array(x, mask=True))\n\n return line,\n\n\nani = animation.FuncAnimation(fig, animate, init_func=init, interval=1, blit=True)\n\nplt.xlim(0, 10)\nplt.ylim(0, 10)\n\nplt.show()\n\n","sub_path":"pyPhys/pdes/heat_Eqn/heat_eqn.py","file_name":"heat_eqn.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"649525596","text":"import pymysql\r\nimport config\r\nfrom collections import OrderedDict\r\nfrom flask import Flask, jsonify, request, render_template\r\nfrom flask_cors import CORS, cross_origin\r\nfrom threading import Thread\r\n\r\n\r\nfrom ExpenseInventory.Expense import expense_api\r\nfrom ExpenseInventory.Inventory import inventory_api, updateLiveInventory\r\nfrom Misc.PaymentGateway import PAY_API\r\nfrom Order import OrdDet_API\r\nfrom Recipes.Recipes import recipe_api\r\nfrom SMS.OTP import OTP_API\r\nfrom SMS.TransactionSMS import traSMS_api\r\nfrom SMS.SMS_Misc import sendOrderSMS, sendSMS\r\nfrom Wallet.Wallet import wallet_api, updateWalletAmount, redeemWalletBalance\r\nfrom Misc.Sales import sales_api\r\nfrom Misc import xeno\r\nfrom CouponCode.Coupon import coupon_API, redeemcoupon\r\nfrom MLM import foodfie_MLM\r\n\r\napplication = Flask(__name__)\r\nCORS(application)\r\napplication.register_blueprint(OTP_API)\r\napplication.register_blueprint(coupon_API)\r\napplication.register_blueprint(traSMS_api)\r\napplication.register_blueprint(PAY_API)\r\napplication.register_blueprint(OrdDet_API)\r\napplication.register_blueprint(wallet_api)\r\napplication.register_blueprint(recipe_api)\r\napplication.register_blueprint(expense_api)\r\napplication.register_blueprint(inventory_api)\r\napplication.register_blueprint(sales_api)\r\napplication.register_blueprint(xeno.xeno_API)\r\n# application.register_blueprint(OrdDet_API)\r\napplication.config[\"JSON_SORT_KEYS\"] = False\r\n\r\n\r\n@application.route('/')\r\ndef index():\r\n return \"Foodfie API!\"\r\n\r\n\r\n@application.route(\"/all\")\r\n@cross_origin(origin='*',headers=['Content- Type','Authorization'])\r\ndef allPages():\r\n return render_template('index.html', test ='test')\r\n\r\n\r\ndef connectDB():\r\n conn = pymysql.connect(host = config.host, port=config.port, user= config.user, password = config.password, db= config.db)\r\n cur = conn.cursor()\r\n return conn, cur\r\n\r\n\r\n@application.route('/api/v1.0/verifyemployee', methods = ['GET'])\r\n@cross_origin(origin='*',headers=['Content- Type','Authorization'])\r\ndef VerifyEmploye():\r\n username = request.args.get('username')\r\n password = request.args.get('password')\r\n conn, cursor = connectDB()\r\n cursor.execute(\"\"\"Select a.StoreId, b.StoreName, b.PackagingAmount, b.CashTaxPer\r\n from foodfie.Employee a\r\n join foodfie.Store b\r\n on a.StoreId = b.StoreId \r\n where a.username ='{0}' and a.password = '{1}'\"\"\".format(username, password))\r\n data = cursor.fetchall()[0]\r\n if len(data) == 0:\r\n return jsonify({'StoreId': -1})\r\n else:\r\n return jsonify({'StoreId': data[0],\r\n 'StoreName': data[1],\r\n 'PackagingAmount':data[2],\r\n 'CashTax':data[3]\r\n })\r\n\r\n\r\n@application.route('/api/v1.0/verifycustomer', methods = ['GET'])\r\n@cross_origin(origin='*',headers=['Content- Type','Authorization'])\r\ndef VerifyCustomer():\r\n mobileNo = request.args.get('mobileno')\r\n conn, cursor = connectDB()\r\n cursor.execute(\"\"\"Select CustomerId, CustomerPhoneNo, CustomerName \r\n from foodfie.Customer where customerphoneno =\"\"\" + str(mobileNo))\r\n data = cursor.fetchall()\r\n return jsonify({'isExist': 1} if data else {'isExist': 0})\r\n\r\n\r\n@application.route('/api/v1.0/verifymobile', methods = ['GET'])\r\n@cross_origin(origin='*',headers=['Content- Type','Authorization'])\r\ndef VerifyMobileNo():\r\n mobileNo = request.args.get('mobileno')\r\n storeId = request.args.get('storeid')\r\n referenceCode = request.args.get('reference_code')\r\n conn, cursor = connectDB()\r\n cursor.execute(\"\"\"Select CustomerId, CustomerPhoneNo, CustomerName \r\n from foodfie.Customer where customerphoneno =\"\"\" + str(mobileNo))\r\n data = cursor.fetchall()\r\n if len(data) == 0:\r\n referenceCode = None if referenceCode == '' else referenceCode\r\n cursor.execute(\"\"\"insert into foodfie.Customer (CustomerPhoneNo,StoreId,RegisterDate, ReferredBy) \r\n values ('{0}',{1}, current_timestamp, '{2}')\"\"\".format(mobileNo, storeId, referenceCode))\r\n conn.commit()\r\n cursor.execute('select customerid, customername from foodfie.Customer order by 1 desc limit 1;')\r\n result = cursor.fetchall()\r\n return jsonify({'ID':result[0][0],'Name': result[0][1]})\r\n else:\r\n return jsonify({'ID':data[0][0],'Name': data[0][2]})\r\n\r\n\r\n@application.route('/api/v1.0/newcustomer', methods = ['POST'])\r\n@cross_origin(origin='*',headers=['Content- Type','Authorization'])\r\ndef NewCustomer():\r\n CustomerMobileNo = request.args.get('mobile')\r\n CustomerName = request.args.get('name')\r\n conn, cursor = connectDB()\r\n cursor.execute('insert into foodfie.Customer (CustomerPhoneNo,CustomerName,RegisterDate) values (\"' + str(CustomerMobileNo) + '\",\"' + str(CustomerName) + '\", current_timestamp);')\r\n conn.commit()\r\n cursor.execute('select customerid, customername from foodfie.Customer order by 1 desc limit 1;')\r\n data = cursor.fetchall()\r\n conn.close()\r\n if len(data) == 0:\r\n return jsonify({})\r\n else:\r\n return jsonify({'ID':data[0][0],'Name': data[0][1]})\r\n\r\n\r\n@application.route('/api/v2.0/newcustomer', methods = ['POST'])\r\n@cross_origin(origin='*',headers=['Content- Type','Authorization'])\r\ndef NewCustomers():\r\n CustomerMobileNo = request.args.get('mobile')\r\n conn, cursor = connectDB()\r\n cursor.execute('insert into foodfie.Customer (CustomerPhoneNo,RegisterDate) values (\"' + str(CustomerMobileNo) + '\", current_timestamp);')\r\n conn.commit()\r\n cursor.execute('select customerid from foodfie.Customer order by 1 desc limit 1;')\r\n data = cursor.fetchall()\r\n conn.close()\r\n if len(data) == 0:\r\n return jsonify({})\r\n else:\r\n return jsonify({'ID':data})\r\n\r\n\r\n@application.route('/api/v1.0/category', methods = ['GET','PUT','POST'])\r\n@cross_origin(origin='*',headers=['Content- Type','Authorization'])\r\ndef Category():\r\n if request.method == 'POST':\r\n CategoryName = request.args.get('categoryname')\r\n conn, cur = connectDB()\r\n cur.execute('insert into foodfie.Category (CategoryName) values(\"' +str(CategoryName) +'\")')\r\n conn.commit()\r\n conn.close()\r\n return jsonify({'Result': 'Successful'})\r\n\r\n elif request.method == 'GET':\r\n output = {}\r\n conn, cur = connectDB()\r\n cur.execute('Select * from Category')\r\n result = cur.fetchall()\r\n for re in result:\r\n output[re[0]] = re[1]\r\n return jsonify({\"Category\" : output})\r\n\r\n elif request.method == 'PUT':\r\n pass\r\n\r\n#\r\n# @application.route('/api/v1.0/item', methods = ['GET'])\r\n# @cross_origin(origin='*',headers=['Content- Type','Authorization'])\r\n# def GetItem():\r\n# categoryId = request.args.get('categoryid')\r\n# if categoryId is not None:\r\n# conn, cursor = connectDB()\r\n# cursor.execute(\"Select ItemId,ItemName, Quantity, ItemPrice, CategoryId from foodfie.Item where categoryid=\" + str(categoryId))\r\n# data = cursor.fetchall()\r\n# if len(data) == 0:\r\n# return jsonify({})\r\n# else:\r\n# items = OrderedDict()\r\n# for item in data:\r\n# items[item[0]] = OrderedDict()\r\n# for x,y in zip(('ID', 'Type', 'Qty', 'Price', 'CatagoryId'), item):\r\n# items[item[0]][x] = y\r\n# return jsonify({'Item': items})\r\n# else:\r\n# conn, cursor = connectDB()\r\n# cursor.execute(\"Select ItemId, ItemName, Quantity, ItemPrice, CategoryId from foodfie.Item\")\r\n# data = cursor.fetchall()\r\n# if len(data) == 0:\r\n# return jsonify({})\r\n# else:\r\n# items = OrderedDict()\r\n# for item in data:\r\n# items[item[0]] = OrderedDict()\r\n# for x,y in zip(('ID', 'Type', 'Qty', 'Price', 'CatagoryId'), item):\r\n# items[item[0]][x] = y\r\n#\r\n# return jsonify({'Item': items})\r\n\r\n\r\n@application.route('/api/v2.0/item', methods = ['GET'])\r\n@cross_origin(origin='*',headers=['Content- Type','Authorization'])\r\ndef NewGetItem():\r\n categoryId = request.args.get('categoryid')\r\n if categoryId is not None:\r\n\r\n conn, cursor = connectDB()\r\n cursor.execute(\"\"\"select CategoryId, ItemName, ItemId, ItemPrice, Quantity\r\n from foodfie.Item\r\n where categoryid = %s\r\n group by CategoryId, ItemName, Quantity\r\n order by CategoryId, ItemId;\"\"\" %categoryId)\r\n data = list(cursor.fetchall())\r\n if len(data) == 0:\r\n return jsonify({})\r\n else:\r\n count = 1\r\n items = OrderedDict()\r\n while len(data) > 0:\r\n firstelement = data[0]\r\n itemlist = [item for item in data if item[0] == firstelement[0] and item[1] == firstelement[1]]\r\n run = 0\r\n items[count] = OrderedDict()\r\n for no, item in enumerate(itemlist):\r\n if run < 1:\r\n for x,y in zip(('CatagoryId', 'Type' ), (item[0], item[1])):\r\n items[count][x] = y\r\n run += 1\r\n items[count][\"items\"] = OrderedDict()\r\n items[count][\"items\"][no] = OrderedDict()\r\n for x,y in zip(('ID', 'Price', 'Qty' ), (item[2], item[3], item[4])):\r\n items[count][\"items\"][no][x] = y\r\n data.remove(item)\r\n count +=1\r\n return jsonify({'Item': items})\r\n else:\r\n conn, cursor = connectDB()\r\n cursor.execute(\"\"\"select CategoryId, ItemName, ItemId, ItemPrice, Quantity\r\n from foodfie.Item\r\n group by CategoryId, ItemName, Quantity\r\n order by CategoryId, ItemId;\"\"\")\r\n data = list(cursor.fetchall())\r\n if len(data) == 0:\r\n return jsonify({})\r\n else:\r\n count = 1\r\n items = OrderedDict()\r\n while len(data) > 0:\r\n firstelement = data[0]\r\n itemlist = [item for item in data if item[0] == firstelement[0] and item[1] == firstelement[1]]\r\n run = 0\r\n items[count] = OrderedDict()\r\n for no, item in enumerate(itemlist):\r\n if run < 1:\r\n for x,y in zip(('CatagoryId', 'Type' ), (item[0], item[1])):\r\n items[count][x] = y\r\n run += 1\r\n items[count][\"items\"] = OrderedDict()\r\n items[count][\"items\"][no] = OrderedDict()\r\n for x,y in zip(('ID', 'Price', 'Qty' ), (item[2], item[3], item[4])):\r\n items[count][\"items\"][no][x] = y\r\n data.remove(item)\r\n count +=1\r\n\r\n return jsonify({'Item': items})\r\n\r\n\r\n@application.route('/api/v4.0/order', methods = ['POST'])\r\n@cross_origin(origin='*',headers=['Content- Type','Authorization'])\r\ndef Orderss():\r\n try:\r\n CustomerId = request.args.get('customerid')\r\n storeId = request.args.get('storeid')\r\n TotalAmount = request.args.get('totalamount')\r\n PaymentType = request.args.get('paymenttype')\r\n ItemId = request.args.get('itemid') if request.args.get('itemid') is None else request.args.get('itemid').split(',')\r\n QTY = request.args.get('qty') if request.args.get('qty') is None else request.args.get('qty').split(',')\r\n Price = request.args.get('price') if request.args.get('price') is None else request.args.get('price').split(',')\r\n IsRedeem = request.args.get('isredeem')\r\n NetAmount = request.args.get('netamount')\r\n OrderType = request.args.get('ordertype')\r\n except:\r\n return jsonify({'Variable Not Set': 'Unsuccessful'})\r\n\r\n try:\r\n conn, cursor = connectDB()\r\n\r\n cursor.execute(\"\"\"insert into foodfie.Order(CustomerId,StoreId,TotalAmount,PaymentType,NetAmount, Order_Type) values ({0}, {1}, {2},'{3}', {4}, '{5}')\"\"\".format(str(CustomerId), str(storeId), str(TotalAmount), str(PaymentType), str(NetAmount), str(OrderType)))\r\n conn.commit()\r\n\r\n # Find the Order Id from the Order Table\r\n cursor.execute('Select OrderId from foodfie.Order order by OrderId desc limit 1 ;')\r\n OrderId = cursor.fetchone()[0]\r\n\r\n # Wallet Functionality\r\n if IsRedeem == 'true':\r\n TotalAmount = int(TotalAmount)\r\n redeemWalletBalance(OrderId, CustomerId, TotalAmount)\r\n\r\n\r\n # Wallet Functionality\r\n updateWalletAmount(CustomerId, NetAmount, OrderId)\r\n\r\n # Verify no rows are already present in OrderDetail table\r\n cursor.execute('Select * from foodfie.OrderDetail where OrderId=' + str(OrderId))\r\n OrderIdExist = cursor.fetchall()\r\n if OrderIdExist.__len__() == 0:\r\n for Item, Qt, Pri in zip(ItemId,QTY,Price):\r\n cursor.execute('insert into foodfie.OrderDetail (OrderId,ItemId,QTY,Price) values (' + str(OrderId) + ',' + str(Item) + ',\"' +str(Qt) + '\", '+ str(Pri) +');')\r\n conn.commit()\r\n return jsonify({'OrderId': OrderId})\r\n else:\r\n return jsonify({'Something Weired in Order': 'UnSuccessful'})\r\n except:\r\n return jsonify({'Data Issue': 'UnSuccessful'})\r\n finally:\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\n@application.route('/api/v3.0/order', methods = ['POST'])\r\n@cross_origin(origin='*',headers=['Content- Type','Authorization'])\r\ndef orders():\r\n try:\r\n CustomerId = request.args.get('customerid')\r\n storeId = request.args.get('storeid')\r\n TotalAmount = request.args.get('totalamount')\r\n PaymentType = request.args.get('paymenttype')\r\n ItemId = request.args.get('itemid') if request.args.get('itemid') is None else request.args.get('itemid').split(',')\r\n QTY = request.args.get('qty') if request.args.get('qty') is None else request.args.get('qty').split(',')\r\n Price = request.args.get('price') if request.args.get('price') is None else request.args.get('price').split(',')\r\n IsRedeem = request.args.get('isredeem')\r\n NetAmount = request.args.get('netamount')\r\n OrderType = request.args.get('ordertype')\r\n couponCode = request.args.get('coupon')\r\n couponDiscount = request.args.get('coupon_discount')\r\n discountType = request.args.get('discount_type')\r\n packagingCharge = request.args.get('packaging_charge')\r\n extraCharge = request.args.get('extra_charge')\r\n reedemAmount = request.args.get('reedem_amount')\r\n finalAmount = request.args.get('finalamount')\r\n except:\r\n return jsonify({'Variable Not Set': 'Unsuccessful'})\r\n\r\n try:\r\n conn, cursor = connectDB()\r\n\r\n cursor.execute(\"\"\"insert into foodfie.Order(CustomerId,StoreId,TotalAmount,PaymentType,NetAmount,\r\n Order_Type, Discount_Type, CouponCode, CouponDiscount, WalletReedemAmount,\r\n CashCharges, PackagingCharges, FinalAmount) \r\n values ({0}, {1}, {2},'{3}', {4}, '{5}', {6}, '{7}', {8}, {9}, {10}, {11}, {12})\r\n \"\"\".format(str(CustomerId),str(storeId), str(TotalAmount), str(PaymentType),\r\n str(NetAmount),str(OrderType), discountType, couponCode, couponDiscount,\r\n reedemAmount, extraCharge, packagingCharge, str(finalAmount)))\r\n conn.commit()\r\n\r\n # Find the Order Id from the Order Table\r\n cursor.execute('Select OrderId from foodfie.Order order by OrderId desc limit 1 ;')\r\n OrderId = cursor.fetchone()[0]\r\n\r\n # Wallet Functionality\r\n if IsRedeem == 'true':\r\n TotalAmount = int(TotalAmount)\r\n redeemWalletBalance(OrderId, CustomerId, TotalAmount)\r\n\r\n # Wallet Redeem Functionality\r\n if PaymentType == 'cash' or (int(discountType) == 0 and int(couponDiscount) > 0 and len(couponCode) > 0):\r\n if PaymentType == 'cash':\r\n SMS_TEXT = \"\"\"Ohhh!! you have just lost 5% cashback by paying via cash.\\n\\nGo cashless. Pay via paytm or card(coming soon) and get instant 5% cashback in your Foodfie wallet.\"\"\"\r\n SQL = \"\"\"SELECT CustomerPhoneNo from foodfie.Customer\r\n WHERE CustomerId = {0}\"\"\".format(CustomerId)\r\n cursor.execute(SQL)\r\n mobileNo = cursor.fetchall()[0][0]\r\n sendSMS(mobileNo, SMS_TEXT, promo=False)\r\n else:\r\n pass\r\n else:\r\n updateWalletAmount(CustomerId, finalAmount, OrderId)\r\n\r\n # Update Coupon Code functionality\r\n if int(discountType) == 0 and int(couponDiscount) > 0 and len(couponCode) > 0:\r\n redeemcoupon(CustomerId, couponCode)\r\n\r\n # Update Inventory\r\n updateLiveInventory(storeId, OrderId, ItemId, QTY)\r\n\r\n # Foodfie MLM\r\n # foodfie_MLM.foodfie_MLM(CustomerId, OrderId, int(NetAmount))\r\n\r\n # Send SMS\r\n sendOrderSMS(CustomerId, OrderId, ItemId, QTY, Price, discountType, IsRedeem, couponCode,\r\n couponDiscount, packagingCharge, extraCharge, reedemAmount, TotalAmount, OrderType, PaymentType, NetAmount)\r\n\r\n # Verify no rows are already present in OrderDetail table\r\n cursor.execute('Select * from foodfie.OrderDetail where OrderId=' + str(OrderId))\r\n OrderIdExist = cursor.fetchall()\r\n if OrderIdExist.__len__() == 0:\r\n for Item, Qt, Pri in zip(ItemId,QTY,Price):\r\n cursor.execute('insert into foodfie.OrderDetail (OrderId,ItemId,QTY,Price) values (' + str(OrderId) + ',' + str(Item) + ',\"' +str(Qt) + '\", '+ str(Pri) +');')\r\n conn.commit()\r\n return jsonify({'OrderId': OrderId})\r\n else:\r\n return jsonify({'Something Weired in Order': 'UnSuccessful'})\r\n\r\n except:\r\n return jsonify({'Data Issue': 'UnSuccessful'})\r\n finally:\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n #t1 = Thread(target=SMS_Misc.sendFeedbackSMS)\r\n t2 = Thread(target=application.run)\r\n #t1.start()\r\n t2.start()\r\n\r\n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":18570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"212031063","text":"# https://python-gtk-3-tutorial.readthedocs.io/en/latest/introduction.html\n# To run: python3 Window.py\n\nimport gi\ngi.require_version('Gtk', '3.0');\nfrom gi.repository import Gtk\n\nfrom huffman import Huffman;\n\nclass HuffmanWindow (Gtk.Window):\n\n def __init__ (self):\n Gtk.Window.__init__ (self, title= \"Huffman Compressor\");\n self.set_size_request (300, 180);\n\n main_box = Gtk.Box (orientation=Gtk.Orientation.HORIZONTAL, spacing=6);\n main_box.set_margin_left (6);\n main_box.set_margin_right (6);\n main_box.set_margin_top (6);\n main_box.set_margin_bottom (6);\n\n box = Gtk.Box (orientation=Gtk.Orientation.VERTICAL, spacing=6);\n box.set_hexpand (True);\n box.set_vexpand (True);\n\n self.separator = Gtk.Separator (orientation=Gtk.Orientation.VERTICAL);\n self.separator.set_visible (False);\n self.separator.set_no_show_all (True);\n\n main_box.add (box);\n main_box.add (self.separator);\n\n self.add (main_box);\n\n encode_label = Gtk.Label (\"Encode text:\");\n encode_label.set_halign (Gtk.Align.START);\n encode_label.get_style_context ().add_class (\"h4\");\n\n self.encode_entry = Gtk.Entry ();\n self.encode_entry.set_placeholder_text (\"Text to compress...\");\n self.encode_entry.connect (\"changed\", self.compress_text);\n\n self.file_chooser = Gtk.FileChooserButton (title=\"Text\", action=Gtk.FileChooserAction.OPEN);\n self.file_chooser.connect (\"file_set\", self.file_set);\n\n file_label = Gtk.Label (\"Input File:\");\n file_label.set_halign (Gtk.Align.START);\n file_label.get_style_context ().add_class (\"h4\");\n\n box.add (encode_label);\n box.add (self.encode_entry);\n box.add (file_label);\n box.add (self.file_chooser);\n\n # Results UI\n self.results_revealer = Gtk.Revealer ();\n self.results_revealer.set_reveal_child (False);\n self.results_revealer.set_transition_type (Gtk.RevealerTransitionType.SLIDE_RIGHT);\n\n results_stack = Gtk.Stack ();\n results_stack.set_hhomogeneous (False);\n results_stack.set_vhomogeneous (False);\n\n switcher = Gtk.StackSwitcher ();\n switcher.set_stack (results_stack);\n switcher.set_halign (Gtk.Align.CENTER);\n\n scrollview = Gtk.ScrolledWindow ();\n scrollview.add (results_stack);\n scrollview.set_vexpand (True);\n scrollview.set_hexpand (True);\n\n results_box = Gtk.Box (orientation=Gtk.Orientation.VERTICAL, spacing=6);\n results_box.add (switcher);\n results_box.add (scrollview);\n\n self.results_revealer.add (results_box);\n main_box.add (self.results_revealer);\n\n # Frequency GUI\n self.frequency = Gtk.Label (\"Character Frequency Code\");\n self.frequency.set_use_markup (True);\n self.frequency.set_valign (Gtk.Align.START);\n results_stack.add_titled (self.frequency, \"frequency\", \"Frequency\");\n\n self.tree = Gtk.Label (\"\");\n results_stack.add_titled (self.tree, \"tree\", \"Binary Tree\");\n\n original_message = Gtk.Label (\"Coded message:\");\n original_message.set_halign (Gtk.Align.START);\n original_message.get_style_context ().add_class (\"h4\");\n\n compression_label = Gtk.Label (\"New Size:\");\n compression_label.set_halign (Gtk.Align.START);\n compression_label.get_style_context ().add_class (\"h4\");\n\n self.original_message = Gtk.Label (\"\");\n self.original_message.set_halign (Gtk.Align.START);\n self.original_message.set_line_wrap (True);\n self.original_message.set_line_wrap_mode (1);\n self.original_message.get_style_context ().add_class (\"dim-label\");\n\n self.efficiency = Gtk.ProgressBar ();\n\n efficiency_box = Gtk.Box (orientation=Gtk.Orientation.VERTICAL, spacing=6);\n efficiency_box.add (compression_label);\n efficiency_box.add (self.efficiency);\n efficiency_box.add (original_message);\n efficiency_box.add (self.original_message);\n\n results_stack.add_titled (efficiency_box, \"efficiency\", \"Efficiency\");\n\n # This is the text to compress. You get it after presing _enter_\n def compress_text (self, entry):\n huffman = Huffman ();\n huffman.originalMessage = str(entry.get_text ())\n huffman.startHuffmanCoding ();\n self.show_results (huffman);\n\n def file_set (self, file_chooser):\n openedFile = open (file_chooser.get_file ().get_path (), \"r\");\n text = \"\"\n for line in openedFile.readlines ():\n text += line\n\n huffman = Huffman ();\n huffman.originalMessage = text\n huffman.startHuffmanCoding ();\n self.show_results (huffman);\n\n def show_results (self, huffman):\n self.separator.set_visible (True);\n self.results_revealer.set_reveal_child (True);\n self.tree.set_label (self.format_tree (huffman));\n self.frequency.set_label (huffman.printFreqTable ());\n\n self.original_message.set_label (huffman.codedMessage);\n self.efficiency.set_fraction (huffman.efficiencyLevel);\n self.efficiency.set_text (\"{0:.2f}%\".format (huffman.efficiencyLevel));\n self.efficiency.set_show_text (True)\n\n def format_tree (self, huffman):\n lines = \"{nodes}\".format (nodes=huffman.node_list[0]).split ('(');\n result = '';\n\n indent_level = 0;\n for node in lines:\n node_str = \"{node}\\n\".format (node=node);\n if node_str == \"\\n\":\n continue;\n\n for x in range (0, indent_level):\n result = result + \" \";\n result = result + node_str.replace (\")\", \"\");\n\n closures = node_str.split (\")\");\n indent_level = indent_level + 2;\n for c in closures:\n indent_level = indent_level - 1;\n\n return result;\n\ndef main():\n win = HuffmanWindow ();\n win.connect (\"delete-event\", Gtk.main_quit);\n win.show_all ();\n Gtk.main ();\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/Window.py","file_name":"Window.py","file_ext":"py","file_size_in_byte":6085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"347531354","text":"from django.conf.urls import url\nfrom . import views\n \nurlpatterns = [\n url(r'^shoplist/$',views.shoplist,name = 'shoplist'),\n url(r'^shopopen/(?P\\d+)/$', views.shopopen, name='shopopen'),\n url(r'^shopclose/(?P\\d+)/$', views.shopclose, name='shopclose'),\n\n\n\n\n\n]","sub_path":"Boss/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"276072507","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2017/9/24 23:53\n# @Author : liulijun\n# @Site : \n# @File : curve.py\n# @Software: PyCharm\nimport sqlite3\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport datetime\n\ndef curve():\n [conn,cur]=__sqlite_conn__()\n sqlstr=\"SELECT farm_name,farm_code,wtgs_id,time,gearbox,generator,pitch,rotor_speed,turbine FROM type_3mw WHERE wtgs_id='30002001'\"\n res=pd.read_sql(sqlstr,con=conn)\n res.index=[datetime.datetime.strptime(time,\"%Y-%m-%d %H:%M:%S\") for time in res['time'].tolist()]\n res=res.replace('A',4)\n res = res.replace('B', 3)\n res = res.replace('C', 2)\n res = res.replace('D', 1)\n print(res)\n ax=res['turbine'].plot()\n plt.yticks([1,2,3,4])\n ax.set_yticklabels(['D','C','B','A'])\n plt.title('turbine')\n plt.show()\n\n\n\ndef __sqlite_conn__():\n conn = sqlite3.connect('../DB/fce.db')\n cur = conn.cursor()\n return conn, cur\n\nif __name__==\"__main__\":\n curve()","sub_path":"classify_3MW/curve.py","file_name":"curve.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"562010833","text":"from kafka import KafkaProducer\nimport json\nfrom faker import Faker\nimport time\n\nfake = Faker()\n\ndef json_serializer(data):\n return json.dumps(data)\n\ndef get_data():\n return{\n 'name':fake.name(),\n 'address':fake.address(),\n 'created_at':fake.year()\n }\n\nif __name__ == '__main__':\n producer = KafkaProducer(bootstrap_servers=['localhost:9092'],value_serializer=json_serializer)\n print('started producer')\n count=20\n while(1==1):\n data = get_data()\n print(data)\n producer.send('testtopic',data)\n time.sleep(60)","sub_path":"kafka_spark/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"243950974","text":"from rapidsms.contrib.handlers import KeywordHandler\n\nclass PresenceHandler(KeywordHandler):\n keyword = \"alright\"\n\n def help(self):\n \"\"\"Invoked if someone just sends `ALRIGHT`. We also call this\n from `handle` if we don't recognize the arguments to ALRIGHT.\n \"\"\"\n self.respond(\"Cool man. Stay safe\")\n\n def handle(self, text):\n \"\"\"Invoked if someone sends `HELP `\"\"\"\n text = text.strip().lower()\n if 'danger' in text:\n self.respond('Ok. Lockdown Marc')\n else:\n self.help()","sub_path":"contacts/contact_handler.py","file_name":"contact_handler.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"7436918","text":"#!/usr/bin/python\n# Filename: mrci.py\n\n# mrci.py - the MRCI algorithm.\n\nfrom .subpackage import *\n\n\nclass MRCI_Calc (object):\n # initialize the class object\n def __init__(self, medList):\n self.medList = medList\n self.medCount = len(self.medList)\n\n # Sectiton A: sum of weighted values relating to dose forms\n def sectionA(self):\n score = 0\n for rx in self.medList:\n for stdDoseForm in list(dosage_forms.doseForm.keys()):\n if rx.doseForm == stdDoseForm:\n score += dosage_forms.doseForm[stdDoseForm]\n return score\n\n # Section B: sum of weighted values relating to dose frequency\n def sectionB(self):\n score = 0\n for rx in self.medList:\n for stdDoseFreq in list(dosage_frequency.doseFreq.keys()):\n if rx.doseFreq == stdDoseFreq:\n score += dosage_frequency.doseFreq[stdDoseFreq]\n return score\n\n # Section C: sum of weighted values relating to special dosing instructions\n def sectionC(self):\n score = 0\n for rx in self.medList:\n for stdAddlDirs in list(\n additional_directions.additionalDirs.keys()):\n if rx.notes == stdAddlDirs:\n score += additional_directions.additionalDirs[stdAddlDirs]\n return score\n\n # This is the score total\n def returnMRCI(self):\n return self.sectionA() + self.sectionB() + self.sectionC()\n\n def getMedCount(self):\n return self.medCount\n\n# End of: mrci.py\n\n'''\ndef # INSTRUCTIONS\n # 1. MRCI applies only to prescribed medications.\n # No assumptions are to be made.\n # 2. Three sectionsof the scale. Complete them in\n # order, add at the end.\n # 3. If the same medication occurs more than once\n # (brand and dosage form) its counted as one.\n # 4. In cases where dosage is optional, choose\n # the instruction with the smallest dose / freq.\n # 5. In certain cases the dosing freq needs to be\n # calculated (e.g. 1 qa + 1 qhs)\n # 6. It is posible that with certain 'as directed'\n # instructions the regimen will not get a score\n # under dose frequency.\n # 7. If there is mor ethan on edoseing freq,\n # they should be scored for each of the freq.\n # 8. Instances where two or more medications are\n # mutually exclusive, the need to be scored\n # twice or more as PRN with the recommended\n # dosing frequency.\n # 9. In cases where there is no matching option,\n # choose the closest option (e.g. 6x/day = q4hr)\n '''","sub_path":"tmst_skeleton_proj/package/mrci.py","file_name":"mrci.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"556291478","text":"# Rest API\nimport requests\nfrom requests.models import parse_header_links\n\nurl = \"https://api.upbit.com/v1/market/all\"\nresp = requests.get(url)\ndata = resp.json() #리스트 타입으로 저장\n\nkrw_ticker = []\nfor coin in data:\n ticker = coin[\"market\"]\n\n if ticker.startswith(\"KRW\"):\n krw_ticker.append(ticker)\nprint(krw_ticker)\n\n","sub_path":"PythonApplication1/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"240077505","text":"import networkx as nx\nimport matplotlib.pyplot as plt\n\n\n\ndef DFS(start_list,number_of_nodes,nodege_dict,vizited_nodes=set(),n=1):\n if len(vizited_nodes)==number_of_nodes or len(start_list)==0:\n pass\n else:\n \n vizited=set()\n new_start_list=[] \n k=1\n for i in range(len(start_list)):\n for j in range(len(nodege_dict[start_list[i]])):\n if nodege_dict[start_list[i]][j] not in vizited_nodes and nodege_dict[start_list[i]][j] not in vizited and all(nodege_dict[start_list[i]][j]!=start_list[o] for o in range(len(start_list))) and all(nodege_dict[start_list[i]][j]!=new_start_list[o] for o in range(len(new_start_list))):\n new_start_list.append(nodege_dict[start_list[i]][j])\n edge_list.append((start_list[i],nodege_dict[start_list[i]][j]))\n pos[start_list[i]]=[k*1000/(len(start_list)+1),10000-n*100]\n vizited.add(start_list[i])\n k+=1\n DFS(new_start_list,number_of_nodes,nodege_dict,vizited_nodes=vizited.union(vizited_nodes),n=n+1)\n \n \n\n\ndata=open('data.txt','r')\n\ng=nx.Graph()\ndata_vallue=data.readlines()\nnode_list=[]\nedge_list=[]\nnodege_dict={}\nlabel_dict={}\npos={}\nfor i in range(len(data_vallue)):\n small_data=data_vallue[i].split()\n \n if not(small_data[0] in label_dict):\n label_dict[small_data[0]]=small_data[0]\n if not(small_data[1] in label_dict):\n label_dict[small_data[1]]=small_data[1]\n if small_data[0] in nodege_dict:\n nodege_dict[small_data[0]].append(small_data[1])\n else:\n nodege_dict[small_data[0]]=[small_data[1]]\n if small_data[1] in nodege_dict:\n nodege_dict[small_data[1]].append(small_data[0])\n else:\n nodege_dict[small_data[1]]=[small_data[0]]\n if all(node_list[j]!=small_data[0] for j in range(len(node_list))):\n node_list.append(small_data[0])\n if all(node_list[j]!=small_data[1] for j in range(len(node_list))):\n node_list.append(small_data[1])\nprint(' '.join(node_list))\nhey=input('Please, input the first node\\n')\nDFS([hey],len(node_list),nodege_dict)\n\n\nnx.draw_networkx_nodes(g,pos,\n nodelist=node_list,\n node_color='g',\n node_size=500,\n alpha=0.7)\n \nnx.draw_networkx_edges(g,pos,\n edgelist=edge_list,\n width=2,edge_color='g',style='solid',\n alpha=0.7)\nnx.draw_networkx_labels(g,pos,font_size=10,labels=label_dict,font_color='black')\n\n\n \n \nplt.axis('off')\nplt.savefig(\"g.png\")\nplt.show() \n \n \n\n","sub_path":"task-2/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":2662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"10130972","text":"from models import Extended_Corr_AE\nfrom torchvision import transforms\nimport torch,torchvision\nimport matplotlib.pyplot as plt\nimport csv\nfrom data_processing import text_feature_get\nfrom multiprocessing import cpu_count\nimport threading\nimport time,os\nfrom PIL import Image\nn = 2#信号量,保证数据读取完毕后再进行模型的训练\ntransform = transforms.Compose([\n transforms.Resize((250,300)),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]\n )\n])\n\nclass TextLoaderThread(threading.Thread):\n def __init__(self):\n super(TextLoaderThread,self).__init__()\n def run(self):\n global _text_data,text_data\n text_data = text_feature_get.get_text_feature(texts=_text_data)\n global n\n n -= 1\n#文本信息读取线程\n\nclass ImgLoaderThread(threading.Thread):\n def __init__(self):\n super(ImgLoaderThread,self).__init__()\n def run(self):\n global _img_data,img_data\n i = 1\n for img in _img_data:\n if i % 1000 == 0:\n print(i, \"images of\", len(_img_data), \"images have been loaded\")\n i += 1\n img_data.append(transform(Image.open(img)).numpy())\n img_data = torch.tensor(img_data).float()\n global n\n n -= 1\n#图像信息读取线程\n\n\nif __name__ == '__main__':\n x = input(\"input path of data:\")\n _text_data = []\n _img_data = []\n texts = list(csv.reader(open(x + '/cxr/report/indiana_reports.csv', encoding='utf-8')))[1:]\n texts = {texts[i][0]: texts[i][6] for i in range(len(texts)) if texts[i][6] != \"\"}\n imgs = list(csv.reader(open(x + '/cxr/report/indiana_projections.csv', encoding='utf-8')))[1:]\n\n for i in range(len(imgs)):\n uid = imgs[i][0]\n filename = 'CXR' + imgs[i][1].replace('.dcm', '')\n if uid in texts:\n _text_data.append(texts[uid])\n _img_data.append(x + '/cxr/image/' + filename)\n _text_data.append(texts[uid])\n _img_data.append(x + '/cxr/image/' + 'flip_' + filename)\n\n text_data = []\n img_data = []\n t1 = TextLoaderThread()\n t2 = ImgLoaderThread()\n t1.start()\n t2.start()\n # 通过两个线程同时对图像数据和文本数据\n while n:\n time.sleep(5)\n # 每5秒主线程检查数据是否读取完毕\n model_name = ['vgg19','alexnet','densenet161','resnet101','squeezenet1_0','inception_v3']\n while True:\n x = int(input(\"choose which model will be trained (0-5):\"))\n if x < 0 or x > 5:\n break\n model = Extended_Corr_AE.ECA(text_size=len(text_data[0]),model_name=model_name[x])\n for i in range(4):\n model.train(texts=text_data, imgs=img_data, num_workers=cpu_count(), batch_size=128, EPOCH=100,alpha=0.3)\n model.save()\n #训练六种不同的模型并保存","sub_path":"2017202021/src/scripts/ECA_Train.py","file_name":"ECA_Train.py","file_ext":"py","file_size_in_byte":2893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"294326826","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport unittest, sys\nfrom geometry_msgs.msg import Pose, PoseStamped, PointStamped, TwistStamped, Vector3Stamped\nfrom moveit_msgs.msg import RobotState, Constraints, MotionPlanRequest\nfrom moveit.task_constructor import core, stages\n\n\nclass TestPropertyMap(unittest.TestCase):\n def __init__(self, *args, **kwargs):\n super(TestPropertyMap, self).__init__(*args, **kwargs)\n self.props = core.PropertyMap()\n\n def _check(self, name, value):\n self.props[name] = value\n self.assertEqual(self.props[name], value)\n\n def test_assign(self):\n self._check(\"double\", 3.14)\n self._check(\"long\", 42)\n self._check(\"long\", 13)\n self._check(\"bool\", True)\n self._check(\"bool\", False)\n self._check(\"string\", \"anything\")\n self._check(\"pose\", Pose())\n # MotionPlanRequest is not registered as property type and should raise\n self.assertRaises(TypeError,self._check, \"request\", MotionPlanRequest())\n\n def test_assign_in_reference(self):\n planner = core.PipelinePlanner()\n props = planner.properties\n\n props[\"goal_joint_tolerance\"] = 3.14\n self.assertEqual(props[\"goal_joint_tolerance\"], 3.14)\n self.assertEqual(planner.goal_joint_tolerance, 3.14)\n\n planner.goal_joint_tolerance = 2.71\n self.assertEqual(props[\"goal_joint_tolerance\"], 2.71)\n\n props[\"planner\"] = \"planner\"\n self.assertEqual(props[\"planner\"], \"planner\")\n self.assertEqual(planner.planner, \"planner\")\n\n props[\"double\"] = 3.14\n a = props\n props[\"double\"] = 2.71\n self.assertEqual(a[\"double\"], 2.71)\n\n planner.planner = \"other\"\n self.assertEqual(props[\"planner\"], \"other\")\n self.assertEqual(planner.planner, \"other\")\n\n del planner\n # TODO: Why can we still access props? planner should be destroyed\n self.assertEqual(props[\"goal_joint_tolerance\"], 2.71)\n self.assertEqual(props[\"planner\"], \"other\")\n\n def test_iter(self):\n # assign values so we can iterate over them\n self.props[\"double\"] = 3.14\n self.props[\"bool\"] = True\n first = [p for p in self.props]\n self.assertEqual(len(first), 2)\n second = [(name, value) for (name, value) in self.props]\n self.assertEqual(first, second)\n\n def test_update(self):\n self.props[\"double\"] = 3.14\n self.props.update({\"double\": 2.72, \"bool\": True})\n self.props.update({})\n self.assertEqual(self.props[\"double\"], 2.72)\n self.assertEqual(self.props[\"bool\"], True)\n\n def test_expose(self):\n self.props[\"double\"] = 3.14\n\n other = core.PropertyMap()\n self.props.exposeTo(other, \"double\")\n self.assertEqual(other[\"double\"], self.props[\"double\"])\n\n self.props.exposeTo(other, \"double\", \"float\")\n self.assertEqual(other[\"float\"], self.props[\"double\"])\n\n\nclass TestModifyPlanningScene(unittest.TestCase):\n def __init__(self, *args, **kwargs):\n super(TestModifyPlanningScene, self).__init__(*args, **kwargs)\n self.mps = stages.ModifyPlanningScene(\"mps\")\n\n def test_attach_objects_invalid_args(self):\n for value in [None, 1, 1.5, {}]:\n self.assertRaises(TypeError, self.mps.attachObjects, value, \"link\")\n self.assertRaises(TypeError, self.mps.attachObjects, value, \"link\", True)\n self.assertRaises(TypeError, self.mps.attachObjects, value, \"link\", False)\n\n def test_attach_objects_valid_args(self):\n self.mps.attachObject(\"object\", \"link\")\n self.mps.detachObject(\"object\", \"link\")\n\n self.mps.attachObjects(\"object\", \"link\")\n self.mps.detachObjects(\"object\", \"link\")\n self.mps.attachObjects(\"object\", \"link\", True)\n self.mps.attachObjects(\"object\", \"link\", False)\n\n self.mps.attachObjects([], \"link\")\n self.mps.attachObjects([\"object\"], \"link\")\n self.mps.attachObjects([\"object1\", \"object2\", \"object3\"], \"link\")\n\n def test_allow_collisions(self):\n self.mps.allowCollisions(\"first\", \"second\")\n self.mps.allowCollisions(\"first\", \"second\", True)\n self.mps.allowCollisions(\"first\", \"second\", False)\n\n self.mps.allowCollisions([\"first\"], [\"second\"])\n\n\nclass TestStages(unittest.TestCase):\n def __init__(self, *args, **kwargs):\n super(TestStages, self).__init__(*args, **kwargs)\n self.planner = core.PipelinePlanner()\n\n def _check(self, stage, name, value):\n self._check_assign(stage, name, value)\n self._check_invalid_args(stage, name, type(value))\n\n def _check_assign(self, stage, name, value):\n setattr(stage, name, value)\n self.assertEqual(getattr(stage, name), value)\n\n def _check_invalid_args(self, stage, name, target_type):\n \"\"\"Check some basic types to raise an ArgumentError when assigned\"\"\"\n for value in [None, 1, 1.0, \"string\", [], {}, set()]:\n try:\n target_type(value)\n continue # ignore values that are implicitly convertible to target_type\n except:\n pass\n\n try:\n setattr(stage, name, value)\n except TypeError as e:\n pass\n except:\n self.fail(\"Assigning {} did raise wrong exception: {}\".format(value, sys.exc_info()[0]))\n else:\n self.fail(\"Assigning {} did not raise an exception, result: {}\".format(value, getattr(stage, name)))\n\n def test_CurrentState(self):\n stage = stages.CurrentState(\"current\")\n\n def test_FixedState(self):\n stage = stages.FixedState(\"fixed\")\n\n def test_ComputeIK(self):\n generator_stage = stages.GeneratePose(\"generator\")\n stage = stages.ComputeIK(\"IK\", generator_stage)\n\n self._check(stage, \"timeout\", 0.5)\n self._check(stage, \"eef\", \"eef\")\n self._check(stage, \"group\", \"group\")\n self._check(stage, \"default_pose\", \"default_pose\")\n self._check(stage, \"max_ik_solutions\", 1)\n self.assertRaises(OverflowError, self._check_assign, stage, \"max_ik_solutions\", -1)\n self._check(stage, \"ignore_collisisons\", False)\n self._check(stage, \"ignore_collisisons\", True)\n self._check(stage, \"ik_frame\", PoseStamped())\n self._check(stage, \"target_pose\", PoseStamped())\n self._check(stage, \"forwarded_properties\", [\"name1\", \"name2\", \"name3\"])\n\n def test_MoveTo(self):\n stage = stages.MoveTo(\"move\", self.planner)\n\n self._check(stage, \"group\", \"group\")\n self._check(stage, \"ik_frame\", PoseStamped())\n stage.setGoal(PoseStamped())\n # TODO: fails\n # stage.setGoal(PointStamped())\n stage.setGoal(RobotState())\n self._check(stage, \"path_constraints\", Constraints())\n\n def test_MoveRelative(self):\n stage = stages.MoveRelative(\"move\", self.planner)\n\n self._check(stage, \"group\", \"group\")\n self._check(stage, \"ik_frame\", PoseStamped())\n self._check(stage, \"min_distance\", 0.5)\n self._check(stage, \"max_distance\", 0.25)\n self._check(stage, \"path_constraints\", Constraints())\n stage.setDirection(TwistStamped())\n stage.setDirection(Vector3Stamped())\n stage.setDirection({'joint': 0.1})\n\n def test_Connect(self):\n planner = core.PipelinePlanner()\n planner2 = core.PipelinePlanner()\n stage = stages.Connect(\"connect\", [(\"planner\", planner), (\"planner2\", planner2)])\n\n def test_FixCollisionObjects(self):\n stage = stages.FixCollisionObjects(\"collision\")\n\n self._check(stage, \"max_penetration\", 0.5)\n\n def test_GenerateGraspPose(self):\n stage = stages.GenerateGraspPose(\"generate_grasp_pose\")\n\n self._check(stage, \"eef\", \"eef\")\n self._check(stage, \"pregrasp\", \"pregrasp\")\n self._check(stage, \"object\", \"object\")\n self._check(stage, \"angle_delta\", 0.5)\n\n def test_GeneratePose(self):\n stage = stages.GeneratePose(\"generate_pose\")\n\n self._check(stage, \"pose\", PoseStamped())\n\n def test_Pick(self):\n generator_stage = stages.GeneratePose(\"generator\")\n stage = stages.Pick(generator_stage, \"pick\")\n\n self._check(stage, \"object\", \"object\")\n self._check(stage, \"eef\", \"eef\")\n self._check(stage, \"eef_frame\", \"eef_frame\")\n self._check(stage, \"eef_group\", \"eef_group\")\n self._check(stage, \"eef_parent_group\", \"eef_parent_group\")\n\n def test_Place(self):\n generator_stage = stages.GeneratePose(\"generator\")\n stage = stages.Place(generator_stage, \"place\")\n\n self._check(stage, \"object\", \"object\")\n self._check(stage, \"eef\", \"eef\")\n self._check(stage, \"eef_frame\", \"eef_frame\")\n self._check(stage, \"eef_group\", \"eef_group\")\n self._check(stage, \"eef_parent_group\", \"eef_parent_group\")\n\n def test_SimpleGrasp(self):\n stage = stages.SimpleGrasp(stages.GenerateGraspPose(\"grasp\"))\n\n self._check(stage, \"eef\", \"eef\")\n self._check(stage, \"object\", \"object\")\n\n def test_SimpleUnGrasp(self):\n stage = stages.SimpleUnGrasp(stages.GenerateGraspPose(\"ungrasp\"))\n\n self._check(stage, \"eef\", \"eef\")\n self._check(stage, \"object\", \"object\")\n\n def test_PropertyMaps(self):\n for name in dir(stages):\n if name.startswith(\"__\") or name.endswith(\"__\"):\n continue\n\n stage = getattr(stages, name)\n try:\n props = stage().properties\n except:\n continue\n\n try:\n for p in props:\n pass\n except Exception as ex:\n print(\"error in class {}: {}\".format(stage, ex))\n raise\n\n\nclass TestTask(unittest.TestCase):\n def __init__(self, *args, **kwargs):\n super(TestTask, self).__init__(*args, **kwargs)\n\n def test(self):\n task = core.Task()\n self.assertEqual(task.id, \"\")\n task = core.Task(\"foo\", core.SerialContainer())\n self.assertEqual(task.id, \"foo\")\n task = core.Task(\"task\")\n self.assertEqual(task.id, \"task\")\n\n current = stages.CurrentState(\"current\")\n self.assertEqual(current.name, \"current\")\n current.timeout = 1.23\n self.assertEqual(current.timeout, 1.23)\n\n task.add(current)\n\n # ownership of current was passed to task\n with self.assertRaises(TypeError):\n current.name\n\n task.add(stages.Connect(\"connect\", []))\n task.add(stages.FixedState())\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"core/python/test/test_mtc.py","file_name":"test_mtc.py","file_ext":"py","file_size_in_byte":10649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"155830700","text":"from warmachine.amazon.security import SecurityGroupDef\n\n# SSH security group\nssh_access = SecurityGroupDef('ssh-access', 'allow ssh access')\nssh_access.auth_group(22, 22, group='jenkins')\nssh_access.auth_cdir(22, 22, cdir='184.169.148.38/32')\nssh_access.auth_cdir(22, 22, cdir='50.18.112.244/32')\nssh_access.auth_cdir(22, 22, cdir='50.18.61.184/32')\n\n# Web access from anywhere\nweb_access = SecurityGroupDef('web-access', 'allow access over web ports')\nweb_access.auth_cdir(80, 80, cdir='0.0.0.0/0')\nweb_access.auth_cdir(443, 443, cdir='0.0.0.0/0')\n\n","sub_path":"warmachine/securitygroups.py","file_name":"securitygroups.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"363931048","text":"import requests\nfrom bs4 import BeautifulSoup\nimport json\nimport ast\nimport fileUtils\n\ndef requestForData():\n url = \"https://www.elantis.be/fr/simulateur-pret-a-temperament/\"\n\n querystring = {\"objective\": \"renovation\", \"renovation-amount\": \"12.500\"}\n\n headers = {\n 'User-Agent': \"PostmanRuntime/7.15.2\",\n 'Accept': \"*/*\",\n 'Cache-Control': \"no-cache\",\n 'Postman-Token': \"84fb7859-b2d0-40f9-bf3d-e05fd349b635,fd208e66-116f-4ba8-8590-61a2693ed30f\",\n 'Host': \"www.elantis.be\",\n 'Cookie': \"pll_language=fr\",\n 'Accept-Encoding': \"gzip, deflate\",\n 'Connection': \"keep-alive\",\n 'cache-control': \"no-cache\"\n }\n\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n soup = BeautifulSoup(response.text, \"html.parser\")\n\n #get all the scripts\n scripts = soup.find_all(\"script\")\n\n #get the script at position 7\n data_script = scripts[7].text\n\n #extracts data_string from the script from the position of JSON.parse( to the closing parenthese );\n js_data = data_script[data_script.find(\"JSON.parse(\"): data_script.find(\");\")]\n\n #return the evaluation of the string without JSON.parse(\n return json.loads(ast.literal_eval(js_data.replace(\"JSON.parse(\", \"\")))\n\ndef bankData():\n bank_data = []\n try:\n script_data = requestForData()\n except:\n print(\"THE SCRIPT STRUCTURE OF ELANTIS HAS BEEN CHANGED PLEASE CHECK IT BACK\")\n script_data = {}\n for lType in script_data:\n data_for_type = []\n if lType == 'car':\n # print(script_data[lType]['types'])\n for pdType in script_data[lType]['types']:\n if pdType['type'] == 'new':\n for rates in pdType['rates']:\n for rate in rates['rates']:\n data_for_type.append({'type': 'NEW CAR LOAN',\n 'productID': 'ELAN0004',\n 'amount': rates['min'],\n 'maxAmnt': rates['max'],\n 'duration': rate['duration'],\n 'rate': rate['taeg']\n\n })\n\n else:\n for rates in pdType['rates']:\n for rate in rates['rates']:\n data_for_type.append({'type': '2dh_car LOAN',\n 'productID': 'ELAN0005',\n 'amount': rates['min'],\n 'maxAmnt': rates['max'],\n 'duration': rate['duration'],\n 'rate': rate['taeg']\n\n })\n bank_data.append(data_for_type)\n elif lType == 'personal':\n data_for_type = []\n for pdType in script_data[lType]['types']:\n for rates in pdType['rates']:\n for rate in rates['rates']:\n data_for_type.append({'type': 'PERSONAL LOAN',\n 'productID': 'ELAN0001',\n 'amount': rates['min'],\n 'maxAmnt': rates['max'],\n 'duration': rate['duration'],\n 'rate': rate['taeg']\n\n })\n bank_data.append(data_for_type)\n elif lType == 'renovation':\n for pdType in script_data[lType]['types']:\n if pdType['type'] == 'classic':\n for rates in pdType['rates']:\n for rate in rates['rates']:\n data_for_type.append({'type': 'RENOVATION LOAN',\n 'productID': 'ELAN0002',\n 'amount': rates['min'],\n 'maxAmnt': rates['max'],\n 'duration': rate['duration'],\n 'rate': rate['taeg']\n\n })\n\n else:\n for rates in pdType['rates']:\n for rate in rates['rates']:\n data_for_type.append({'type': 'ENERGY LOAN',\n 'productID': 'ELAN0003',\n 'amount': rates['min'],\n 'maxAmnt': rates['max'],\n 'duration': rate['duration'],\n 'rate': rate['taeg']\n\n })\n bank_data.append(data_for_type)\n return bank_data\n\ndef formatDataFromBank(bank_data, provider):\n frame_to_export = []\n for loanList in bank_data:\n for loan in loanList:\n frame_to_export.append([provider, loan['productID'], loan['type'], loan['amount'], loan['maxAmnt'],\n loan['duration'], loan['rate']])\n return frame_to_export\n\ndef elantisLoanScraper():\n print('ELANTIS SCRAPE PROCESSING ...')\n tab_col = ['PROVIDER ', 'PRODUCTID', 'LOAN TYPE', 'MIN AMT', 'MAX AMT', 'TERM', 'RATE']\n data_matrix = formatDataFromBank(bankData(), 'ELANTIS')\n if data_matrix:\n fileUtils.displayRates(tab_col, data_matrix)\n return fileUtils.upToDate('elantis_rates', 'ELANTIS SCRAPE', data_matrix, tab_col)\n else:\n return None\n\n\n\n\n\n# elantisLoanScraper()\n","sub_path":"Elantis_rates.py","file_name":"Elantis_rates.py","file_ext":"py","file_size_in_byte":5933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"537035638","text":"from app.common.pygame_surface_wrapper_factory import PygameSurfaceWrapperFactory\nfrom app.player.player_manager import PlayerManager\nfrom app.renderers.game_play_renderer_manager import GamePlayRendererManager\nfrom app.settings.application_settings import ApplicationSettings\nfrom app.states.abc_game_state import ABCGameState\nfrom app.updaters.game_play_updater import GamePlayUpdater\nfrom app.worldspace.world_space_manager import WorldSpaceManager\n\n\nclass GamePlayState(ABCGameState):\n\n def __init__(self, gameStateChanger, gamePlayControllerModel):\n\n self.gameStateChanger = gameStateChanger\n self.controllerModel = gamePlayControllerModel\n self.updater = GamePlayUpdater(self.controllerModel)\n\n defaultPlayerProfileName = \"Player1\"\n self.playerManager = PlayerManager(defaultPlayerProfileName)\n\n self.worldSpaceManager = WorldSpaceManager(self.playerManager.playerState.worldSpaceName)\n\n settings = ApplicationSettings()\n self.mainSurfaceWrapper = PygameSurfaceWrapperFactory.buildNonAlpha(size=(\n settings.windowWidth,\n settings.windowHeight\n ))\n\n self.rendererManager = GamePlayRendererManager(\n self.worldSpaceManager.camera,\n self.worldSpaceManager.backgroundRenderer,\n self.playerManager.playerState.renderer\n )\n\n self.updatedPlayerState = None\n\n def processInputs(self, events):\n self.controllerModel.processInputs(events)\n\n def update(self, deltaTimeSeconds):\n\n self.updatedPlayerState = self.updater.update(deltaTimeSeconds, self.playerManager, self.worldSpaceManager)\n\n def render(self, screenWrapper):\n\n self.rendererManager.render(screenWrapper, self.updatedPlayerState)\n","sub_path":"app/states/game_play/game_play_state.py","file_name":"game_play_state.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"196766666","text":"from pprint import pprint\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nrocket_control_df = pd.read_excel('mintoc_gravity_turn.xlsx')\nt = rocket_control_df['t']\nu = rocket_control_df['u']\n\nt_resampled = np.arange(0, 400+1, 1)\nu_resampled = np.interp(t_resampled, t, u)\nprint(u_resampled)\n\nfig, ax = plt.subplots()\nax.plot(t, u, 'o')\nax.plot(t_resampled, u_resampled, 'x')\nplt.show()\n","sub_path":"resample_test.py","file_name":"resample_test.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"195637435","text":"\"\"\"\nCalling a JSON API\n\nIn this assignment you will write a Python program somewhat similar to http://www.py4e.com/code3/geojson.py.\nThe program will prompt for a location, contact a web service and retrieve JSON for the web service and parse\nthat data, and retrieve the first place_id from the JSON. A place ID is a textual identifier that uniquely\nidentifies a place as within Google Maps.\nAPI End Points\n\nTo complete this assignment, you should use this API endpoint that has a static subset of the Google Data:\n\nhttp://py4e-data.dr-chuck.net/json?\nThis API uses the same parameter (address) as the Google API. This API also has no rate limit so you can test as\noften as you like. If you visit the URL with no parameters, you get \"No address...\" response.\nTo call the API, you need to include a key= parameter and provide the address that you are requesting as the\naddress= parameter that is properly URL encoded using the urllib.parse.urlencode() function as shown in\nhttp://www.py4e.com/code3/geojson.py\n\nMake sure to check that your code is using the API endpoint is as shown above. You will get different results\nfrom the geojson and json endpoints so make sure you are using the same end point as this autograder is using.\n\nTest Data / Sample Execution\n\nYou can test to see if your program is working with a location of \"South Federal University\" which will have a\nplace_id of \"ChIJJ2MNmPl_bIcRt8t5x-X5ZhQ\".\n\n$ python3 solution.py\nEnter location: South Federal University\nRetrieving http://...\nRetrieved 2290 characters\nPlace id ChIJJ2MNmPl_bIcRt8t5x-X5ZhQ\nTurn In\n\nPlease run your program to find the place_id for this location:\n\nSmolensk State University\nMake sure to enter the name and case exactly as above and enter the place_id and your Python code below.\nHint: The first seven characters of the place_id are \"ChIJD5y ...\"\nMake sure to retreive the data from the URL specified above and not the normal Google API. Your program should\nwork with the Google API - but the place_id may not match for this assignment.\n\"\"\"\n\nimport urllib.request, urllib.parse, urllib.error\nimport json, ssl\n\napi_key = False\nif api_key is False:\n api_key = 42 # using this api key and url\n service_url = \"http://py4e-data.dr-chuck.net/json?\"\nelse:\n service_url = \"https://maps.googleapis.com/maps/api/geocode/json?\"\n\n# ignore ssl certificate errors\nctx = ssl.create_default_context()\nctx.check_hostname = None\nctx.verify_mode = ssl.CERT_NONE\n\n# input for location\naddress = input(\"Enter location: \")\nif len(address)<1:\n \"break\"\n\nparametr = dict()\nparametr[\"address\"] = address # store address into parameter\nif api_key is not False:\n parametr[\"key\"] = api_key # store api key into parameter\nurl = service_url + urllib.parse.urlencode(parametr) # make actual url to retrieve service url + address + api key\n\nprint(\"Retrieving \", url)\nurl_hand = urllib.request.urlopen(url, context=ctx)\ndata = url_hand.read().decode()\nprint(\"Retrieved \", len(data), \"characters\")\n\ntry:\n js = json.loads(data) # load json data into js\nexcept:\n js = None\n\n# show error message and display data\nif not js or \"status\" not in js or js[\"status\"] != \"OK\":\n print(\"--- Failure To Retrieve ---\")\n print(data)\n \"continue\"\n\n# find for place id and display as result\nplace_id = js[\"results\"][0][\"place_id\"]\nprint(\"Place Id \", place_id)","sub_path":"using python to access web data/using the geojson api.py","file_name":"using the geojson api.py","file_ext":"py","file_size_in_byte":3331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"350751197","text":"# @Time : 2018/6/6 21:53 \n# @Author : Jing Xu\n\nimport os\n\nfile_dir = \"./40/\"\n\nfor dirs in os.walk(file_dir):\n\t# print(root) #当前目录路径\n\t# print(dirs) #当前路径下所有子目录\n\t# print(files) #当前路径下所有非目录子文件\n\tfor directory in dirs:\n\t\t# print(directory)\n\t\tfor dir1 in directory:\n\t\t\tfor names in os.walk(dir1):\n\t\t\t\tif names == \"input.dat\":\n\t\t\t\t\tprint(names)\n\t\t\t\t\tf = open(names, 'r+', encoding='gbk')\n\t\t\t\t\tall_the_lines = f.readlines()\n\t\t\t\t\t# print(all_the_lines)\n\t\t\t\t\tfor line in all_the_lines:\n\t\t\t\t\t\tstr1 = '40 !// 脉宽'\n\t\t\t\t\t\tprint(666)\n\t\t\t\t\t\tstr2 = '50 !// 脉宽'\n\t\t\t\t\t\tf.write(line.replace(str1, str2))\n\t\t\t\t\tf.close()\n\n\n\n","sub_path":"Repo_Python/zhihu_interest/test_replace.py","file_name":"test_replace.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"193924507","text":"from multiprocessing import Pool\nimport os,time,random\n\ndef long_time_task(name):\n print('task %s start...'%name)\n start = time.time()\n time.sleep(random.random() * 3)\n end = time.time()\n print('task %s runs %0.2f seconds'%(name,end - start))\n\nif __name__ == \"__main__\":\n print('parent process(%s) start...'%os.getpid())\n p = Pool(8)\n for i in range(9):\n p.apply_async(long_time_task,args=(i,))\n p.close()\n p.join()\n print('All subprocess is done.')\n","sub_path":"pool_test.py","file_name":"pool_test.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"481284599","text":"import pygame, sys, random\nfrom Enemy import Enemy\nfrom Ninja import Ninja\nfrom HUD import Text\nfrom HUD import Score\nfrom Button import Button\nfrom Bullet import Bullet\nfrom Gun import Gun\n\npygame.init()\n\nclock = pygame.time.Clock()\n\nwidth = 800 \nheight = 600\nsize = width, height\n\nbgColor = r,g,b = 0, 0, 0\n\nscreen = pygame.display.set_mode(size)\n\nbgImage = pygame.image.load(\"images/Screens/Start Screen.png\").convert()\nbgRect = bgImage.get_rect()\n\nNinja = Ninja([width/2, height/2])\n\nballs = []\nballs += [Enemy(\"images/Enemy/DeathChicken.png\", [4,5], [100, 125])]\n\ntimer = Score([80, height - 25], \"Time: \", 36)\ntimerWait = 0\ntimerWaitMax = 6\n\nscore = Score([width-80, height-25], \"Score: \", 36)\n\nrun = False\n\nstartButton = Button([width/2, height-300], \n\t\t\t\t \"images/Buttons/Start Base.png\", \n\t\t\t\t \"images/Buttons/Start Clicked.png\")\n\nwhile True:\n\twhile not run:\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT: sys.exit()\n\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\tif event.key == pygame.K_RETURN:\n\t\t\t\t\trun = True\n\t\t\tif event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\t\tstartButton.click(event.pos)\n\t\t\tif event.type == pygame.MOUSEBUTTONUP:\n\t\t\t\tif startButton.release(event.pos):\n\t\t\t\t\trun = True\n\t\t\t\t\t\n\t\tbgColor = r,g,b\n\t\tscreen.fill(bgColor)\n\t\tscreen.blit(bgImage, bgRect)\n\t\tscreen.blit(startButton.image, startButton.rect)\n\t\tpygame.display.flip()\n\t\tclock.tick(60)\n\t\t\n\tbgImage = pygame.image.load(\"images/screens/main screen.png\").convert()\n\tbgRect = bgImage.get_rect()\n\twhile run:\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT: sys.exit()\n\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\tif event.key == pygame.K_w or event.key == pygame.K_UP:\n\t\t\t\t\tNinja.go(\"up\")\n\t\t\t\tif event.key == pygame.K_d or event.key == pygame.K_RIGHT:\n\t\t\t\t\tNinja.go(\"right\")\n\t\t\t\tif event.key == pygame.K_s or event.key == pygame.K_DOWN:\n\t\t\t\t\tNinja.go(\"down\")\n\t\t\t\tif event.key == pygame.K_a or event.key == pygame.K_LEFT:\n\t\t\t\t\tNinja.go(\"left\")\n\t\t\tif event.type == pygame.KEYUP:\n\t\t\t\tif event.key == pygame.K_w or event.key == pygame.K_UP:\n\t\t\t\t\tNinja.go(\"stop up\")\n\t\t\t\tif event.key == pygame.K_d or event.key == pygame.K_RIGHT:\n\t\t\t\t\tNinja.go(\"stop right\")\n\t\t\t\tif event.key == pygame.K_s or event.key == pygame.K_DOWN:\n\t\t\t\t\tNinja.go(\"stop down\")\n\t\t\t\tif event.key == pygame.K_a or event.key == pygame.K_LEFT:\n\t\t\t\t\tNinja.go(\"stop left\")\n\t\t\t\n\t\tif len(balls) < 10:\n\t\t\tif random.randint(0, 1*60) == 0:\n\t\t\t\tballs += [Enemy(\"images/Enemy/DeathChicken.png\",\n\t\t\t\t\t\t [random.randint(0,10), random.randint(0,10)],\n\t\t\t\t\t\t [random.randint(100, width-100), random.randint(100, height-100)])\n\t\t\t\t\t\t ]\n\t\t\t\t\t\t \n\t\tif timerWait < timerWaitMax:\n\t\t\ttimerWait += 1\n\t\telse:\n\t\t\ttimerWait = 0\n\t\t\ttimer.increaseScore(.1)\n\t\tNinja.update(width, height)\n\t\ttimer.update()\n\t\tscore.update()\n\t\tfor ball in balls:\n\t\t\tball.update(width, height)\n\t\t\t\n\t\tfor bully in balls:\n\t\t\tfor victem in balls:\n\t\t\t\tbully.collideBall(victem)\n\t\t\tif bully.collidePlayer(Ninja):\n\t\t\t\tscore.increaseScore(1)\n\t\t\n\t\tfor ball in balls:\n\t\t\tif not ball.living:\n\t\t\t\tballs.remove(ball)\n\t\t\n\t\tbgColor = r,g,b\n\t\tscreen.fill(bgColor)\n\t\tscreen.blit(bgImage, bgRect)\n\t\tfor ball in balls:\n\t\t\tscreen.blit(ball.image, ball.rect)\n\t\tscreen.blit(Ninja.image, Ninja.rect)\n\t\tscreen.blit(timer.image, timer.rect)\n\t\tscreen.blit(score.image, score.rect)\n\t\tpygame.display.flip()\n\t\tclock.tick(60)\n","sub_path":"SUPER AWESOME NINJA GAME.py","file_name":"SUPER AWESOME NINJA GAME.py","file_ext":"py","file_size_in_byte":3324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"571885930","text":"# -*- coding: utf-8 -*- {{{\r\n#\r\n# Your license here\r\n# }}}\r\n\r\nimport sys\r\nfrom os.path import dirname, abspath, join\r\nimport os\r\n\r\nsys.path.insert(0, dirname(dirname(dirname(abspath(__file__)))))\r\n\r\nfrom dateutil import parser\r\nfrom datetime import datetime, timedelta\r\nimport pandas as pd\r\nimport configparser\r\nimport numpy\r\nimport maya\r\nimport csv\r\nfrom pathlib import Path\r\n\r\nimport utils\r\nfrom fleet_interface import FleetInterface\r\nfrom fleet_request import FleetRequest\r\nfrom fleet_response import FleetResponse\r\nfrom fleet_config import FleetConfig\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nclass DistributionVoltageService:\r\n \"\"\"\r\n The peak management service short summary\r\n \"\"\"\r\n\r\n def __init__(self, *args, **kwargs):\r\n #def __init__(self, fleet, *args, **kwargs):\r\n # The scope of the project is to test service with one fleet...\r\n #self.fleet = fleet\r\n\r\n # Get cur directory\r\n self.base_path = dirname(abspath(__file__))\r\n\r\n # Read config file\r\n config_header = 'config'\r\n self.config = configparser.ConfigParser()\r\n self.config.read(join(self.base_path, 'config.ini'))\r\n\r\n self.name = self.config.get(config_header, 'name', fallback='Distribution Voltage Regulation Service')\r\n self.capacity_scaling_factor = float(self.config.get(config_header, 'capacity_scaling_factor', fallback=1.0))\r\n #self.f_reduction = float(self.config.get(config_header, 'f_reduction', fallback=0.1))\r\n self.drive_cycle_file = self.config.get(config_header, 'drive_cycle_file',\r\n fallback='drive.cycle.voltage.csv')\r\n self.drive_cycle_file = join(self.base_path, 'data', self.drive_cycle_file)\r\n #self.drive_cycle = pd.read_csv(self.drive_cycle_file)\r\n self.drive_cycle = pd.read_csv(self.drive_cycle_file, parse_dates=['time'])\r\n self.Vupper = float(self.config.get(config_header, 'Vupper', fallback=1.05))\r\n self.Vlower = float(self.config.get(config_header, 'Vlower', fallback=0.95))\r\n self.operation_flag = self.config.get(config_header, 'operation_flag', fallback='0')\r\n #self.starttime = self.config.get(config_header, 'starttime')\r\n #self.endtime = self.config.get(config_header, 'endtime')\r\n\r\n #self.sim_step = timedelta(seconds=30)\r\n \r\n\r\n def request_loop(self, sensitivity_P = 0.0001, \r\n sensitivity_Q = 0.0005,\r\n start_time = parser.parse(\"2017-08-01 16:00:00\"),\r\n end_time = parser.parse(\"2017-08-01 17:00:00\")):\r\n # Sensitivity_P, Sensitivity_Q are values depending on feeder characteristics\r\n # We can use dummy value to conduct test\r\n #sensitivity_P = 0.001\r\n #sensitivity_Q = 0.005\r\n assigned_service_kW=self._fleet.assigned_service_kW()\r\n assigned_service_kVar=self._fleet.assigned_service_kW()\r\n\r\n\r\n cur_time = start_time\r\n #end_time = endtime\r\n\r\n delt = self.sim_step\r\n volt = self.drive_cycle[\"voltage\"]\r\n time = self.drive_cycle[\"time\"]\r\n List_Time = list(time.values)\r\n dts = maya.parse(start_time).datetime() - maya.parse(List_Time[0]).datetime()\r\n dts = (dts).total_seconds()\r\n Vupper = self.Vupper\r\n Vlower = self.Vlower\r\n responses = []\r\n requests = []\r\n while cur_time < end_time:\r\n # normal operation\r\n\r\n \r\n for n in range(len(list(time.values))):\r\n\r\n dta = maya.parse(List_Time[n]).datetime() \r\n dtb = maya.parse(cur_time).datetime() - timedelta(seconds = dts)\r\n if dta==dtb:\r\n index=n\r\n \r\n \r\n # index = list(time.values).index(cur_time)\r\n cur_voltage = volt.values[index]\r\n #cur_voltage = 1.055\r\n if cur_voltage >= self.Vlower and cur_voltage <= self.Vupper:\r\n Prequest = 0\r\n Qrequest = 0\r\n else:\r\n if cur_voltage > Vupper:\r\n dV = Vupper - cur_voltage\r\n Qrequest = -dV/sensitivity_Q # need Q absorption\r\n if Qrequest<-1*assigned_service_kVar:\r\n Qrequest=-1*assigned_service_kVar\r\n Prequest = -dV/sensitivity_P # need P curtailment\r\n if Prequest<-1*assigned_service_kW:\r\n Prequest=-1*assigned_service_kW\r\n elif cur_voltage < Vlower:\r\n dV = Vlower - cur_voltage\r\n Qrequest = dV/sensitivity_Q # need Q injection\r\n if QrequestL\", packedIP)[0]\n\n\ndef long2ip(ip):\n return socket.inet_ntoa(struct.pack('>L', ip))\n\n\nengine = create_engine('sqlite:///reblaze.db', echo=False)\nengine.raw_connection().connection.text_factory = str\nBase = declarative_base()\n\n\nclass Firm(Base):\n __tablename__ = 'firm'\n id = Column(Integer, primary_key=True, autoincrement=True)\n start_ip_long = Column(BigInteger(), )\n end_ip_long = Column(BigInteger(), )\n firm_name = Column(String(200), )\n\n def __repr__(self):\n return \"{0}:{1}:{2}\".format(self.start_ip_long, self.end_ip_long, self.firm_name)\n\n\nclass GEOIPCountry(Base):\n __tablename__ = 'country'\n id = Column(Integer, primary_key=True, autoincrement=True)\n start_ip_long = Column(BigInteger(), )\n end_ip_long = Column(BigInteger(), )\n start_ip = Column(String(20), )\n end_ip = Column(String(20), )\n country_code = Column(String(2), )\n country = Column(String(25), )\n\n def __repr__(self):\n return \"{0}:{1}:{2}:{3}\".format(self.start_ip, self.end_ip, self.country_code, self.country)\n\n\nBase.metadata.create_all(engine)\nSession = sessionmaker(bind=engine)\nsession = Session()\n\n\ndef get_all_ip_by_country_code(country):\n q = session.query(GEOIPCountry).filter(GEOIPCountry.country_code == country)\n return list(q.values('start_ip', 'end_ip'))\n\n\ndef get_all_ip_by_country(country):\n q = session.query(GEOIPCountry).filter(GEOIPCountry.country.like('%' + country + '%'))\n return list(q.values('start_ip', 'end_ip'))\n\n\ndef get_country_by_ip_range(start_ip, end_ip):\n q = session.query(GEOIPCountry).filter(and_(GEOIPCountry.start_ip.like('%' + start_ip + '%'),\n GEOIPCountry.end_ip.like('%' + end_ip + '%')))\n return list(q.values('country', 'country_code'))\n\n\ndef get_country_by_ip(ip):\n ip_ = ip2long(ip)\n q = session.query(GEOIPCountry).filter(and_(GEOIPCountry.start_ip_long <= ip_,\n GEOIPCountry.end_ip_long >= ip_))\n return list(q.values('country', 'country_code'))\n\n\ndef get_firm_by_ip(ip):\n ip_ = ip2long(ip)\n q = session.query(Firm).filter(and_(Firm.start_ip_long <= ip_,\n Firm.end_ip_long >= ip_))\n return list(q.values('firm_name'))\n\n\ndef get_all_by_firm(firm):\n q = session.query(Firm).filter((Firm.firm_name.like('%' + firm + '%')))\n ip = list(q.values('start_ip_long', 'end_ip_long'))\n return [(long2ip(x), long2ip(z)) for x, z in ip]\n\n\ndef get_all_geo_count():\n return session.query(GEOIPCountry).count()\n\n\ndef get_all_firm_count():\n return session.query(Firm).count()\n\n\n# helpers\ndef parse_csv_firm(path='GeoIPASNum2.csv'):\n import codecs\n with codecs.open(path, 'r', encoding='latin-1') as csvfile:\n for i, row in enumerate(csvfile):\n start_ip_long, end_ip_long, firm_name = row.split(\",\", maxsplit=2)\n # print(start_ip_long, end_ip_long, firm_name)\n firm = Firm(\n start_ip_long=start_ip_long.strip('\"'),\n end_ip_long=end_ip_long.strip('\"'),\n firm_name=firm_name.strip('\"')\n )\n print(\"Item {0}>{1}\".format(i, firm))\n session.add(firm)\n session.commit()\n print(\"Import is done\")\n\n\n# helpers for sqlite\nimport csv\n\n\ndef parse_csv_company(path='firm.csv'):\n with open(path) as csvfile:\n reader = csv.DictReader(csvfile, fieldnames=(\"ip_start\", \"ip_end\", \"name\"))\n for row in reader:\n firm = Firm(\n start_ip_long=row['ip_start'],\n end_ip_long=row['ip_end'],\n firm_name=row['name']\n )\n print(\"Item {0}\".format(firm))\n session.add(firm)\n session.commit()\n print(\"Import is done\")\n\n\ndef parse_csv_country(path='country.csv'):\n with open(path) as csvfile:\n reader = csv.DictReader(csvfile, fieldnames=(\"ip_start\", \"ip_end\", \"ip_start_l\", \"ip_end_l\", \"code\", \"name\"))\n for row in reader:\n geo = GEOIPCountry(\n start_ip=row['ip_start'],\n end_ip=row['ip_end'],\n start_ip_long=int(row['ip_start_l']),\n end_ip_long=int(row['ip_end_l']),\n country_code=row['code'],\n country=row['name'],\n )\n print(\"Item {0}\".format(geo))\n session.add(geo)\n session.commit()\n print(\"Import is done\")\n\n\n# if __name__ == \"__main__\":\n # parse_csv_firm()\n # parse_csv_country()\n","sub_path":"sql_engine.py","file_name":"sql_engine.py","file_ext":"py","file_size_in_byte":4933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"239612272","text":"# -*- coding: utf-8 -*-\nimport plot_routes\ndef test_plot_routes():\n # cities_data_dict = {'Tver':[],'Krasnogorsk':[],'Moscow':[],'Sarov':[]}\n cities_data_dict = {'Tver':[],'Krasnogorsk':[],'Voskresensk':[],'Sarov':[],'Dmitrov':[]}\n\n for key,data_list in cities_data_dict.iteritems():\n data_list.append(plot_routes.get_lat_lon_by_address(key))\n\n coords_moscow = plot_routes.get_lat_lon_by_address('Moscow')\n\n for key,data_list in cities_data_dict.iteritems():\n data_list.append(plot_routes.get_route_from_gmaps(coords_moscow,data_list[0]))\n\n for key,el in cities_data_dict.iteritems():\n print(key)\n print(len(el))\n print(el[-1][-1]/(60.*60),\"hours\")\n\n \n sorted_routes = sorted(cities_data_dict.items(), key=lambda x:x[-1][-1][-1])\n print(sorted_routes)\n for city in sorted_routes:\n print(\"%s, %f hours\"%(city[0],city[-1][-1][-1]/(60*60.)))\n print(city[1][0])\n\n cities_coords = []\n cities_names = []\n for city in sorted_routes:\n cities_coords.append(plot_routes.get_pairs_list_from_dicts_list([city[1][0]])[0])\n cities_names.append(city[0])\n\n # print(cities_names)\n # print(cities_coords)\n\n plot_routes.plot_route_on_basemap(sorted_routes[0][1][1][0], sorted_routes[0][1][1][1],added_points_param_list=[cities_coords,cities_names])\n\n return True\n\ndef test():\n\tdur_to_closest = 0.\n\tdist_to_closest = 0.\n\n\treturn dur_to_closest, dist_to_closest\n\nif __name__ == \"__main__\":\n ret = test_plot_routes()\n print(ret)","sub_path":"old/find_optim_route.py","file_name":"find_optim_route.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"37517931","text":"from signal4gmns import *\n\n\ndef Processing():\n # Step 0.1: Set Working Directory\n data_Set_Path = r''\n # Step 0.2: Set Working Directory\n Set_Working_Directory(data_Set_Path)\n inter='--check_position'\n # Step 1: Load Data\n Read_Input_Data()\n # Step 2: Check the List of Signal Nodes (Optional)\n Display_signalNode_info()\n # Step 3: perform three modules for each Signal Node\n loggings.info('Module 1: Output Location for each Signal Node')\n for osmID, signal_Node in g_node_map.items():\n signal_Node.Set_Major_Apporach()\n Output_Intermediate_Files(inter)\n loggings.info('Module 2: Perform Left turn treatment for each Signal Node')\n Input_Intermediate_Files(inter)\n inter='--set_volume'\n for osmID, signal_Node in g_node_map.items():\n signal_Node.Initialization()\n Output_Intermediate_Files(inter)\n loggings.info('Module 3: Perform QEM for each Signal Node')\n Input_Intermediate_Files(inter)\n\n for osmID,signal_Node in g_node_map.items():\n signal_Node.PerformQEM()\n\n #Step 4: Output two signal_timing_phase Files\n # obtain the two jump-two files\n Output_Singal_Timing_Movement_Files()\n #Step 6: jump-three, convert the two files to timing.csv\n Output_Timing_File()\n\nif __name__ == '__main__':\n\n import osm2gmns as og\n net = og.getNetFromOSMFile('map.osm', default_lanes=True, POIs=True)\n og.connectPOIWithNet(net)\n og.generateNodeActivityInfo(net)\n og.consolidateComplexIntersections(net)\n og.outputNetToCSV(net, output_folder='consolidated')\n og.generateMovements(net)\n og.outputNetToCSV(net)\n og.show(net)\n# og.saveFig(net)\n\n Processing()\n\n\n # for testing\n # datasetPath_multi_level = r'Dataset\\3_ASU_0314'\n # datasetPath_root = r''\n # signal4gmns(datasetPath_root)\n\n","sub_path":"examples/ASU/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"88358707","text":"\"\"\"Kivy multiple tabs and button on hover cursor change with python only.\n\nThis example does not use kv language. This is only for those who do not know or use kivymd.\nThere is hovering problem when sometimes it does not change to arrow.\n\nReferences:\n https://gist.github.com/opqopq/15c707dc4cffc2b6455f\n\"\"\"\nfrom kivy.app import App\nfrom kivy.properties import BooleanProperty, ObjectProperty\nfrom kivy.core.window import Window\nfrom kivy.metrics import dp\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.button import Button\nfrom kivy.uix.tabbedpanel import TabbedPanel, TabbedPanelItem\n\n\nclass HoverBehavior(object):\n \"\"\"Hover behavior.\n :Events:\n `on_enter`\n Fired when mouse enter the bbox of the widget.\n `on_leave`\n Fired when the mouse exit the widget\n \"\"\"\n\n hovered = BooleanProperty(False)\n border_point= ObjectProperty(None)\n '''Contains the last relevant point received by the Hoverable. This can\n be used in `on_enter` or `on_leave` in order to know where was dispatched the event.\n '''\n\n def __init__(self, **kwargs):\n self.register_event_type('on_enter')\n self.register_event_type('on_leave')\n Window.bind(mouse_pos=self.on_mouse_pos)\n super(HoverBehavior, self).__init__(**kwargs)\n\n def on_mouse_pos(self, *args):\n if not self.get_root_window():\n return # do proceed if I'm not displayed <=> If have no parent\n pos = args[1]\n #Next line to_widget allow to compensate for relative layout\n inside = self.collide_point(*self.to_widget(*pos))\n if self.hovered == inside:\n #We have already done what was needed\n return\n self.border_point = pos\n self.hovered = inside\n if inside:\n self.dispatch('on_enter')\n else:\n self.dispatch('on_leave')\n\n def on_enter(self):\n pass\n\n def on_leave(self):\n pass\n\n\nfrom kivy.factory import Factory\nFactory.register('HoverBehavior', HoverBehavior)\n\n\nclass HoverButton(Button, HoverBehavior):\n def on_enter(self, *args):\n print(self.text)\n print(\"You are in, through this point\", self.border_point)\n Window.set_system_cursor('hand')\n\n def on_leave(self, *args):\n print(\"You left through this point\", self.border_point)\n Window.set_system_cursor('arrow')\n\n\nclass HoverTabs(TabbedPanelItem, HoverBehavior):\n def on_enter(self, *args):\n print(self.text)\n print(\"You are in, through this point\", self.border_point)\n Window.set_system_cursor('hand')\n\n def on_leave(self, *args):\n print(\"You left through this point\", self.border_point)\n Window.set_system_cursor('arrow')\n\n\nclass ButtonApp(App):\n def __init__(self, **kwargs):\n super(ButtonApp, self).__init__(**kwargs)\n\n\n tmp_btn = HoverButton(text='BUTTON 1', size_hint=(None, None), size=(200, 60), color=(0, 1, 0, 1), font_size=dp(20),\n on_press=self.do_sum)\n tmp_btn2 = HoverButton(text='BUTTON 2', size_hint=(None, None), pos=(50, 100), size=(200, 60),\n color=(0, 1, 0, 1), font_size=dp(30), on_press=self.do_sum_2)\n\n self.fl = FloatLayout()\n\n self.bl = BoxLayout()\n self.bl.add_widget(tmp_btn)\n self.bl.add_widget(tmp_btn2)\n\n tabs = TabbedPanel()\n tabs.do_default_tab = False\n tab1 = HoverTabs(text='TAB 1')\n tab3 = HoverTabs(text='TAB 2', on_press=self.tab2_func)\n tab4 = HoverTabs(text='TAB 3')\n\n tab3.add_widget(self.bl)\n tabs.add_widget(tab1)\n tabs.add_widget(tab3)\n tabs.add_widget(tab4)\n tabs.default_tab = tab1\n self.fl.add_widget(tabs)\n\n def build(self):\n return self.fl\n\n def do_sum(self, *args, **kwargs):\n print(\"DOING SUM\")\n\n def do_sum_2(self, *args, **kwargs):\n print(\"DOING SUM 2\")\n\n def tab2_func(self, *args, **kwargs):\n print('CLICKED ON TAB2')\n\nif __name__=='__main__':\n ButtonApp().run()","sub_path":"kivy/kivy_tabs_button_hover_icon_change.py","file_name":"kivy_tabs_button_hover_icon_change.py","file_ext":"py","file_size_in_byte":4099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"514990348","text":"import math\n\ndef reward_function(params):\n # initial reward value\n reward = 1\n\n # Penalize if the car goes off track\n all_wheels_on_track = params['all_wheels_on_track']\n if not all_wheels_on_track:\n reward = 1e-3\n return reward\n\n # Read input parameters\n distance_from_center = params['distance_from_center']\n track_width = params['track_width']\n # steering = abs(params['steering_angle']) # Only need the absolute steering angle\n\n # Stay away from the edge of the track pls\n edge_of_track_distance = 0.4 * track_width\n distance_from_center_factor = 1\n\n if distance_from_center > edge_of_track_distance:\n distance_from_center_factor = 0.5\n\n # Step 3\n # Calculate the direction of the center line based on the closest waypoints\n\n waypoints = params['waypoints']\n closest_waypoints = params['closest_waypoints']\n next_point = waypoints[closest_waypoints[1]]\n prev_point = waypoints[closest_waypoints[0]]\n\n # Calculate the direction in radius, arctan2(dy, dx), the result is (-pi, pi) in radians\n track_direction = math.atan2(next_point[1] - prev_point[1], next_point[0] - prev_point[0])\n\n # Convert to degree\n track_direction = math.degrees(track_direction)\n\n is_left_of_center = params['is_left_of_center']\n heading = params['heading']\n\n\n\t# Calculate the difference between the track direction and the heading direction of the car\n direction_diff = abs(track_direction - heading)\n\n\t# Penalize the reward if the difference is too large\n DIRECTION_THRESHOLD = 10.0\n if direction_diff > DIRECTION_THRESHOLD:\n reward *= 0.5\n\n\n # if is_left_of_center:\n # if (heading - track_direction > 5):\n # # on the left of the track and turn more left than track line, going off track\n # reward = 1e-3\n # return reward\n # else:\n # if (track_direction - heading > 5):\n # reward = 1e-3\n # return reward\n\n\n # direction_diff = abs(track_direction - heading)\n # HEADING_THRESHOLD_LVL_1 = 10\n # HEADING_THRESHOLD_LVL_2 = 25\n\n # if (direction_diff < 2 ):\n # reward *= 100\n # elif (direction_diff > HEADING_THRESHOLD_LVL_1):\n # reward *= 0.5\n\n # reward = reward * distance_from_center_factor\n\n return float(reward)\n","sub_path":"custom_files/reward.py","file_name":"reward.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"135869809","text":"import math\nfrom ..Containers import States\nfrom ..Containers import Inputs\nfrom ..Modeling import PayloadDynamicsModel\nfrom ..Modeling import WindModel\nfrom ..Utilities import MatrixMath\nfrom ..Utilities import Rotations\nfrom ..Constants import PayloadPhysicalConstants as PPC\n\nimport numpy as np\n\nclass PayloadAerodynamicsModel:\n # \"Public\" Functions------------------------------------------------------------------------------------------------\n # init and reset functions function for the class\n def __init__(self, initialSpeed=25.0, initialHeight=100):\n self.initialSpeed = initialSpeed\n self.initialHeight = initialHeight\n self.VDM = PayloadDynamicsModel.PayloadDynamicsModel()\n self.WM = WindModel.WindModel()\n return\n def reset(self):\n self.initialSpeed = PPC.InitialSpeed\n self.initialHeight = PPC.InitialDownPosition\n self.WM = WindModel.WindModel()\n self.VDM = PayloadDynamicsModel.PayloadDynamicsModel()\n return\n\n # Get functions ---------------------------------------------------\n def getPayloadDynamicsModel(self):\n return self.VDM\n\n def getVehicleState(self):\n state = self.VDM.getVehicleState()\n return state\n\n def getWindModel(self):\n return self.WM\n\n def setWindModel(self, windModel):\n self.WM = windModel\n return\n\n # Set functions ---------------------------------------------------\n def setVehicleState(self, state):\n self.VDM.setVehicleState(state)\n return\n\n def Update(self):\n state = self.VDM.getVehicleState()\n self.WM.Update()\n retForces = self.updateForces(state)\n self.VDM.Update(retForces)\n\n # \"Private\" Functions------------------------------------------------------------------------------------------------\n def gravityForces(self, state):\n DCM = state.R\n DCM = MatrixMath.scalarMultiply(PPC.mass, DCM)\n graVector = [[0], [0], [PPC.g0]]\n gForce = Inputs.forcesMoments()\n tempMat = MatrixMath.multiply(DCM, graVector)\n gForce.Fx = tempMat[0][0]\n gForce.Fy = tempMat[1][0]\n gForce.Fz = tempMat[2][0]\n return gForce\n\n \n\n def aeroForces(self, state):\n #call coeffAlpha\n C_D = PPC.payCD\n # calculate fLift and fDrag\n alpha = state.alpha\n beta = state.beta\n retVal = Inputs.forcesMoments()\n fLift = 0\n circleArea = np.pi*PPC.Radius**2\n fDrag = ((PPC.rho * (state.Va ** 2) * circleArea)/2) * (C_D)\n bodyV = np.array([state.u,state.v,state.w])\n dragDirectionVec = -bodyV/(math.sqrt(state.u**2 + state.v**2 + state.w**2))\n #print(dragDirectionVec)\n dragBody = fDrag*dragDirectionVec\n \n retVal= Inputs.forcesMoments(Fx=dragBody[0],Fy=dragBody[1],Fz=dragBody[2])\n\n return retVal\n\n def updateForces(self, state, wind=None):\n\n #Replacing this code with the Calculate airspeed function call for this system\n\n #state.Va = math.hypot(state.u, state.v, state.w)\n #state.alpha = math.atan2(state.w, state.u) # angle of attack\n #if math.isclose(state.Va, 0.0): # Sideslip Angle, no airspeed\n # state.beta = 0.0\n #else:\n # state.beta = math.asin(state.v / state.Va) # Sideslip Angle, normal definition\n\n state.Va, state.alpha, state.beta = self.CalculateAirspeed(state, wind)\n\n aeroRet = Inputs.forcesMoments()\n aeroRet = self.aeroForces(state)\n\n \n \n\n gravRet = self.gravityForces(state)\n sumForces = Inputs.forcesMoments()\n\n sumForces.Fx = aeroRet.Fx + gravRet.Fx\n sumForces.Fy = aeroRet.Fy + gravRet.Fy\n sumForces.Fz = aeroRet.Fz + gravRet.Fz\n sumForces.Mx = aeroRet.Mx + gravRet.Mx\n sumForces.My = aeroRet.My + gravRet.My\n sumForces.Mz = aeroRet.Mz + gravRet.Mz\n\n return sumForces\n\n def CalculateAirspeed(self, state, wind):\n\n # Get the windspeed\n u = state.u\n v = state.v\n w = state.w\n if wind == None:\n Wn = 0\n We = 0\n Wd = 0\n else:\n Wn = wind.Wn\n We = wind.We\n Wd = wind.Wd\n\n\n UVW_i = [[u], [v], [w]]\n windNed = [[Wn], [We], [Wd]]\n\n\n\n # Take the Wu v w wind values and bring them to the inertial Wind Model Handout\n Ws = math.sqrt(Wn ** 2 + We ** 2 + Wd ** 2)\n\n chiw = math.atan2(We, Wn)\n\n if Ws == 0:\n gamw = 0\n else:\n gamw = -1 * math.asin(Wd/Ws)\n\n # make the matrix Yw and Xw\n R_ywxw = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n R_ywxw[0][0] = math.cos(chiw) * math.cos(gamw)\n R_ywxw[0][1] = math.cos(gamw) * math.sin(chiw)\n R_ywxw[0][2] = -1 * math.sin(gamw)\n\n R_ywxw[1][0] = -1 * math.sin(chiw)\n R_ywxw[1][1] = math.cos(chiw)\n R_ywxw[1][2] = 0\n\n R_ywxw[2][0] = math.cos(chiw) * math.sin(gamw)\n R_ywxw[2][1] = math.sin(chiw) * math.sin(gamw)\n R_ywxw[2][2] = math.cos(gamw)\n\n R_ywxwT = MatrixMath.transpose(R_ywxw)\n\n if wind == None:\n Wind_uvw = [[0], [0], [0]]\n else:\n Wind_uvw = [[wind.Wu], [wind.Wv], [wind.Ww]]\n\n\n Wind_uvw = MatrixMath.multiply(R_ywxwT, Wind_uvw)\n\n sumWind = MatrixMath.add(windNed, Wind_uvw)\n\n\n DCM = state.R\n\n Wb = MatrixMath.multiply(DCM, sumWind)\n UVW_r = MatrixMath.subtract(UVW_i, Wb)\n\n u_r = UVW_r[0][0]\n v_r = UVW_r[1][0]\n w_r = UVW_r[2][0]\n\n Va = math.sqrt( u_r ** 2 + v_r ** 2 + w_r ** 2 )\n\n\n\n\n alpha = math.atan2(w_r, u_r)\n\n denom = math.sqrt(u_r ** 2 + v_r ** 2 + w_r ** 2)\n if denom == 0:\n beta = math.asin(0)\n else:\n beta = math.asin(v_r/(denom))\n\n\n return Va, alpha, beta\n\n\n","sub_path":"UAV_Payload_Delivery/CodeBase/ece163/Modeling/PayloadAerodynamicsModel.py","file_name":"PayloadAerodynamicsModel.py","file_ext":"py","file_size_in_byte":5886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"651139871","text":"#!/usr/bin/env python\nimport os\nimport sys\nimport glob\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport subprocess\nfrom Bio import SeqIO\nfrom subprocess import DEVNULL\nfrom collections import defaultdict\n\ndef main(args):\n Rdb = parse_usearch_clustering(args.input)\n Rdb['scaffold'] = [\"_\".join(prot.split(\"_\")[:-1]) for prot in Rdb['sequence']]\n Rdb['centroid_scaffold'] = [\"_\".join(prot.split(\"_\")[:-1]) for prot in Rdb['centroid']]\n\n ## Get scaffold lengths\n s2l = {}\n seqs = {}\n for fn in os.listdir(args.fasta):\n if fn.endswith(\".fa\"):\n for record in SeqIO.parse(os.path.join(args.fasta,fn), \"fasta\"):\n s2l[record.id] = len(record.seq)\n seqs[record.id] = record.seq\n Rdb['length'] = Rdb['scaffold'].map(s2l)\n\n ## Get largest contig\n f_long = open(args.output + \"rplF_contigs.fna\", \"w+\")\n for cluster, db in Rdb.groupby('cluster'):\n cluster = str(cluster)\n leng = str(db.sort_values('length', ascending=False)['length'].tolist()[0])\n largest_name = str(db.sort_values('length', ascending=False)['scaffold'].tolist()[0])\n centroid = str(db.sort_values('length', ascending=False)['cluster'].tolist()[0])\n f_long.write(\">\" + cluster + \"_\" + largest_name + \":\" + centroid + \".\" + str(leng) + \"\\n\")\n f_long.write(str(seqs[largest_name]) + \"\\n\")\n f_long.close()\n\n ## Print info\n Rdb = Rdb.rename(columns={'sequence':'gene', 'centroid':'centroid_gene', 'length':'scaffold_length'})\n Rdb.to_csv(args.output + \"clustering.info.tsv\", sep='\\t', index=False)\n\ndef parse_usearch_clustering(loc):\n '''\n From the location of a .uc usearch file, return something like Cdb\n https://www.drive5.com/usearch/manual/cmd_calc_distmx.html\n https://www.drive5.com/usearch/manual/opt_uc.html\n '''\n dtypes = {0:'category', 1:'category', 2:np.int32, 8:'object'}\n ucols = [0,1,2,8]\n Rdb = pd.read_csv(loc, header=None, usecols=ucols,dtype=dtypes, sep='\\t')\n table = defaultdict(list)\n\n # Find the centroids\n sdb = Rdb[Rdb[0] == 'S']\n shdb = Rdb[Rdb[0].isin(['H', 'S'])]\n for centroid, cdb in sdb.groupby(1):\n cent = cdb[8].tolist()[0].split()[0]\n db = shdb[shdb[1] == centroid]\n\n for seq in db[8].tolist():\n table['cluster'].append(int(centroid))\n table['members'].append(len(db))\n table['sequence'].append(seq.split()[0])\n table['centroid'].append(cent)\n return pd.DataFrame(table)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description= \"\"\"Parsers UC file\"\"\")\n parser.add_argument('--input', help=\"path to uc file\", action=\"store\")\n parser.add_argument('--fasta', help=\"path to fasta files\", action=\"store\")\n parser.add_argument('--output', help=\"path to output files\", action=\"store\")\nargs = parser.parse_args()\nmain(args)\n","sub_path":"parsers/parse_usearch.py","file_name":"parse_usearch.py","file_ext":"py","file_size_in_byte":2888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"510155155","text":"import logging\nfrom collections import defaultdict\n\nimport grequests\nimport requests\nfrom flask import Response, jsonify\nfrom werkzeug.exceptions import HTTPException\n\nSWAPI = \"https://swapi.co/api\"\n\n\nclass SWAPIException(HTTPException):\n code = 503\n description = \"SWAPI appears to be down at the moment\"\n\n\nclass SWAPIError(HTTPException):\n code = 400\n description = \"Unexpected response from SWAPI\"\n\n\nclass JsonResponse(Response):\n\n @classmethod\n def force_type(cls, rv, environ=None):\n if isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, list):\n rv = jsonify(rv)\n elif isinstance(rv, HTTPException):\n rv = jsonify({\n \"error\": rv.description\n })\n return super(JsonResponse, cls).force_type(rv, environ)\n\n\ndef make_request(url):\n try:\n resp = requests.get(url)\n except requests.exceptions.ConnectionError:\n raise SWAPIException\n else:\n if resp.status_code != 200:\n raise SWAPIError\n return resp.json()\n\n\ndef configure(app):\n app.response_class = JsonResponse\n\n @app.before_first_request\n def setup_logging():\n if not app.debug:\n # In production mode, add log handler to sys.stderr.\n app.logger.addHandler(logging.StreamHandler())\n app.logger.setLevel(logging.INFO)\n\n @app.errorhandler(Exception)\n def errorhandler(e):\n app.logger.exception(e)\n return {\n \"error\": \"internal server error\"\n }, 500\n\n @app.route(\"/films\", methods=[\"GET\"])\n def get_films():\n group = defaultdict(list)\n\n resp = make_request(\"{}/films\".format(SWAPI))\n films = resp.get(\"results\", [])\n\n for film in films:\n item = {\n \"title\": film[\"title\"],\n \"swapi_id\": film[\"url\"].split(\"/\")[-2],\n }\n group[film[\"director\"]].append(item)\n\n return group, 200\n\n @app.route(\"/characters/\", methods=[\"GET\"])\n def get_characters(film_id):\n\n resp = make_request(\"{}/films/{}\".format(SWAPI, film_id))\n urls = resp.get(\"characters\", [])\n\n names = []\n characters = grequests.imap([grequests.get(url) for url in urls])\n for character in characters:\n if character.status_code != 200:\n app.logger.warning(\n \"Getting character returned status code {}\".format(\n character.status_code\n )\n )\n continue\n name = character.json().get(\"name\")\n if name is not None:\n names.append(name)\n\n return names, 200\n\n return app\n","sub_path":"zume/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"73712989","text":"from sqlalchemy.orm import sessionmaker\r\nfrom sqlalchemy.ext.declarative import declarative_base\r\nfrom sqlalchemy import Column, create_engine\r\nfrom sqlalchemy.types import CHAR, Integer, String\r\nfrom random import randint\r\n\r\nBaseModel = declarative_base()\r\n# need to create database first\r\n# mysql> create database sqlalchemy;\r\n# mysql> use sqlalchemy;\r\nengine = create_engine('mysql://root:passw0rd@localhost:3306/sqlalchemy')\r\nDBSession = sessionmaker(bind=engine)\r\nsession = DBSession()\r\n# this command will create tables automatically\r\nBaseModel.metadata.create_all(engine)\r\n\r\n# CREATE TABLE `user` (\r\n# `id` integer NULL AUTO_INCREMENT ,\r\n# `name` varchar(30) NULL ,\r\n# `age` integer NULL DEFAULT NULL ,\r\n# PRIMARY KEY (`id`)\r\n# );\r\nclass User(BaseModel):\r\n __tablename__ = 'user'\r\n\r\n id = Column(Integer, primary_key=True, autoincrement=True)\r\n name = Column(String(30))\r\n age = Column(Integer)\r\n\r\n\r\nsession.execute(\r\n User.__table__.insert(),\r\n [{'name': 'randint(1, 100)', 'age': randint(1, 100)} for i in xrange(10000)]\r\n)\r\nsession.commit()\r\n","sub_path":"PythonDemo/sqlalchemy/test04_batch_insert.py","file_name":"test04_batch_insert.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"372996847","text":"# -*- coding: utf-8 -*-\n\n# Scrapy settings for slickdealspider project\n#\n# For simplicity, this file contains only the most important settings by\n# default. All the other settings are documented here:\n#\n# http://doc.scrapy.org/en/latest/topics/settings.html\n#\n\nBOT_NAME = 'slickdealspider'\n\nSPIDER_MODULES = ['slickdealspider.spiders']\nNEWSPIDER_MODULE = 'slickdealspider.spiders'\n\nITEM_PIPELINES = {\n 'files.FilesPipeline':1,\n 'slickdealspider.pipelines.SlickdealspiderPipeline':100,\n}\n\nLOG_LEVEL='DEBUG'\n\nDOWNLOAD_DELAY = 2\nRANDOMIZE_DOWNLOAD_DELAY = True\nUSER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.54 Safari/536.5'\nCOOKIES_ENABLED = True\nCONCURRENT_REQUESTS = 1\n\nFILES_STORE = 'D:\\\\sliu10\\\\webroot\\\\slickdealspider\\\\img'\n\n# Crawl responsibly by identifying yourself (and your website) on the user-agent\n#USER_AGENT = 'slickdealspider (+http://www.yourdomain.com)'\n","sub_path":"slickdealspider/slickdealspider/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"252454374","text":"#!/usr/bin/python3\n\nimport sys\nsys.path.append('../ry')\nfrom libry import *\n\nK = Config()\nD = K.view()\n\nK.addFile('../test/lgp-example.g');\nK.makeObjectsConvex()\n\nlgp = K.lgp(\"../test/fol.g\");\n\nlgp.walkToNode(\"(grasp baxterR stick) (push stickTip redBall table1) (grasp baxterL redBall)\");\nlgp.nodeInfo()\n\nlgp.optBound(BT.path, True);\n\nlgp.nodeInfo()\n\nkomo = lgp.getKOMOforBound(BT.path)\nkomo.display()\n\ninput(\"Press Enter to continue...\")\n","sub_path":"test/lgp-example.py","file_name":"lgp-example.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"482643021","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport sys\nimport os\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\n\n\nimport FLARE\nfrom FLARE.SED import models\nfrom FLARE.SED import SFZH\nimport FLARE.filters\n\n\n\n\n\n\n# SPS = models.SPS('P2/ModSalpeter_100')\nSPS = models.SPS('BPASSv2.2.1.binary/ModSalpeter_300')\n\n\n\nsfzh, sfr = SFZH.constant(SPS.grid['log10age'], SPS.grid['log10Z'] , {'log10_duration': 8., 'log10Z': -2., 'log10M*': 8.})\n\nprint('star formation rate: {0}'.format(sfr))\n\nfesc = 0.0\n\nSED = SPS.get_Lnu(sfzh, {'fesc': fesc, 'log10tau_V': -0.3}, dust = ('simple', {'slope':-1}))\n\n\n# --- create observed SED\n\ncosmo = FLARE.default_cosmo()\nz = 8.5\n\nSED.total.get_fnu(cosmo, z) # calculate observer frame wavelength\nSED.total_intrinsic.get_fnu(cosmo, z) # calculate observer frame wavelength\n\nplt.plot(SED.total_intrinsic.lamz, np.log10(SED.total_intrinsic.fnu), zorder = 1) # plot SED\nplt.plot(SED.total.lamz, np.log10(SED.total.fnu), zorder = 1) # plot SED\n\nplt.axvline(1216*(1+z), c='k', lw=1, alpha = 0.5)\n\nfilters = FLARE.filters.NIRCam_W\nF = FLARE.filters.add_filters(filters, new_lam = SED.total.lamz) # --- NOTE: need to give it the redshifted\nSED.total.get_Fnu(F) # generates Fnu (broad band fluxes)\nfor f in filters: plt.scatter(F[f].pivwv(), np.log10(SED.total.Fnu[f]), edgecolor = 'k', zorder = 2, label = f)\n\n\n\nplt.xlim([5000.,50000.])\n\nmx = np.max(np.log10(SED.total.fnu))\nplt.ylim([mx-4., mx+0.3])\nplt.show()\n","sub_path":"examples/SED/SED_wdust.py","file_name":"SED_wdust.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"601299455","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nFile: utils.py.py\nAuthor: Scott Yang(Scott)\nEmail: yangyingfa@skybility.com\nCopyright: Copyright (c) 2020, Skybility Software Co.,Ltd. All rights reserved.\nDescription:\n\"\"\"\nimport os\nimport shutil\nimport time\nfrom typing import Union, Optional\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup as bs\n\n__URL = 'https://maoyan.com/films?showType=3'\n__USER_AGENT = 'Mozilla/5.0 (X11; Fedora; Linux x86_64) AppleWebKit/537.36 ' \\\n '(KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36'\n__COOKIE = '__mta=19167879.1601174661233.1601174895258.1601175697001.3; ' \\\n 'uuid_n_v=v1; uuid=578BC310006B11EBB86FCBED18BA04EA658ED82A12' \\\n '6B410D806EF30553458A85; _csrf=acb4b9917f72934d8c626cf5e7e97bd8dd' \\\n 'd75ef91d1f61bab299d1e673fc584e; ' \\\n 'mojo-uuid=298574953f55a6a3100126d50' \\\n 'c2ca339; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1601174661; ' \\\n '_lxsdk_cuid=174cd7267c4c8-091558ecac18b4-66313e58-1fa400-1' \\\n '74cd7267c4c8; _lxsdk=578BC310006B11EBB86FCBED18BA04EA658ED82A1' \\\n '26B410D806EF30553458A85; mojo-session-id={\"id\":\"2ee9c763407f0e65' \\\n 'a51ec26c5e51fa37\",\"time\":1601174661131}; mojo-trace-id=7; Hm_lpv' \\\n 't_703e94591e87be68cc8da0da7cbd0be2=1601175742; ' \\\n '__mta=19167879.1601' \\\n '174661233.1601175697001.1601175742059.4; ' \\\n '_lxsdk_s=174cd7267c7-491-9' \\\n '63-fab%7C%7C11'\nMAOYAN_MOVIES_HTML = 'list_maoyans.html'\n\n\nclass Movies:\n \"\"\"there is a lot of thing can be done in this class, i feel\"\"\"\n __slots__ = ['_name', '_movie_type', '_show_time']\n\n def __init__(self, name, movie_type, show_time):\n self._name = name\n self._movie_type = movie_type\n self._show_time = show_time\n\n @property\n def to_list(self):\n return [self._name, self._movie_type, self._show_time]\n\n\ndef get_index_data(url=None):\n url = url\n if not url:\n url = __URL\n\n print(f'Scrapy website {url}...')\n response = requests.get(url, headers={'User-Agent': __USER_AGENT,\n 'Cookie': __COOKIE}, timeout=10)\n if not response.ok:\n print(f'Failed to get response from {url} with code '\n f'{response.status_code}')\n raise ValueError\n\n return response\n\n\ndef parse_rsp(response: Union[str, requests.Response], num: Optional[int]):\n # bs_info = bs(response.text)\n if not response:\n return\n\n content = ''\n if isinstance(response, str):\n content = response\n\n elif isinstance(response, requests.Response):\n content = response.text\n\n if not content:\n return\n\n bs_info = bs(content, features='html.parser')\n all_movies = bs_info.find_all(\n 'div', attrs={'class': 'movie-item film-channel'}, limit=num)\n\n ret = []\n for movie_info in all_movies:\n\n dumpy_info = movie_info.find_all(\n 'div', attrs={'class': 'movie-hover-title'})\n name, movie_type, people, show_time = [x for x in dumpy_info]\n name = name.find(class_='name').get_text()\n movie_type = str(movie_type.find('span').next_sibling).strip()\n show_time = str(show_time.find('span').next_sibling).strip()\n ret.append(Movies(name, movie_type, show_time))\n\n return ret\n\n\ndef save_to_csv(data: list):\n if not isinstance(data, list):\n raise TypeError\n\n sum_data = []\n for ret in data:\n sum_data.append(ret.to_list)\n\n file_name = 'test.csv'\n\n pds = pd.DataFrame(data=sum_data)\n if os.path.exists(file_name):\n tmp_file = file_name.strip(\n '.csv') + time.strftime('-%Y%m%dT%H%M%S', time.localtime()) + '.csv'\n print(f'backup csv file [{file_name}] to [{tmp_file}]')\n shutil.copy(file_name, tmp_file)\n\n print(f'save new content to {file_name}')\n pds.to_csv('test.csv', sep='#', header=False, index=False)\n\n\ndef main():\n print(f'Start to scrapy website {__URL}')\n if os.path.exists(MAOYAN_MOVIES_HTML):\n with open(MAOYAN_MOVIES_HTML, 'r') as f:\n rsp = f.read()\n\n else:\n rsp = get_index_data()\n with open(MAOYAN_MOVIES_HTML, 'w') as f:\n f.write(rsp.text)\n\n rsp = rsp.text\n\n # print(rsp)\n ret = parse_rsp(rsp, num=10)\n save_to_csv(ret)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Week01/home_work/bs_maoyan/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"642223870","text":"#!/usr/bin/env python \n\nimport os\nimport sys\nsys.path.insert(0, os.pardir)\nfrom testing_harness import TestHarness\n\n\nclass StatepointTestHarness(TestHarness):\n def __init__(self):\n super(StatepointTestHarness, self).__init__(None, False)\n\n def _test_output_created(self):\n \"\"\"Make sure statepoint files have been created.\"\"\"\n sps = ('statepoint.03.*', 'statepoint.06.*', 'statepoint.09.*')\n for sp in sps:\n self._sp_name = sp\n TestHarness._test_output_created(self)\n\n\nif __name__ == '__main__':\n harness = StatepointTestHarness()\n harness.main()\n","sub_path":"tests/test_statepoint_batch/test_statepoint_batch.py","file_name":"test_statepoint_batch.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"365198525","text":"from flask import Flask,request\nfrom flask_cors import cross_origin\n\nfrom controllers.user import getUser\nfrom controllers.user import createUser\nfrom controllers.login import doLogin\nfrom controllers.category import getCategories\nfrom controllers.method import getMethods\nfrom controllers.method import createMethod\n\napp = Flask(__name__)\n\n\n#Ruta para inico de sesión\n@app.route(\"/login\",methods=['POST'])\n@cross_origin()\ndef login():\n return doLogin()\n\n#Ruta para obtener información de usuarios y registro\n@app.route(\"/user\",methods=['POST','GET'])\n@cross_origin()\ndef user():\n if request.method == 'POST':\n return createUser()\n else:\n return getUser()\n\n#Ruta para obtener las categorías\n@app.route(\"/category\",methods=['GET'])\n@cross_origin()\ndef category():\n return getCategories()\n\n#Ruta para obtener y agregar las funciones\n@app.route(\"/method\",methods=['POST','GET'])\n@cross_origin()\ndef method():\n if request.method == 'POST':\n return createMethod()\n else:\n return getMethods()\n\n\nif __name__ == \"__main__\":\n app. run(debug=False,port=int(\"5000\"),host='0.0.0.0') #app.run(debug=False)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"4277984","text":"import numpy as np\nimport sys\n\ndef a_fb(sqrtshalf,gf):\n MZ = 90\n GFNom = 1.0\n\n sqrts = sqrtshalf*2.\n A_FB_EN = np.tanh((sqrts-MZ)/MZ*10)\n A_FB_GF = gf/GFNom\n return 2*A_FB_EN*A_FB_GF\n \ndef diffxsec(costheta,sqrtshalf,gf):\n norm = 2.*((1.+1./3.))\n return ((1+costheta**2)+a_fb(sqrtshalf,gf)*costheta)/norm\n\ndef rej_sample_costheta(nsamples,sqrtshalf,gf):\n ntrials = 0\n samples = []\n x = np.linspace(-1,1,num = 1000)\n maxval = np.max(diffxsec(x,sqrtshalf,gf))\n while len(samples) < nsamples:\n ntrials = ntrials+1\n xprop = np.random.uniform(-1,1)\n ycut = np.random.random()\n yprop = diffxsec(xprop,sqrtshalf,gf)/maxval\n if yprop/maxval < ycut:\n continue\n samples.append(xprop)\n return np.array(samples)\n\ndef simulator(theta,phi,n_samples):\n sys.stdout.write('.')\n samples = rej_sample_costheta(n_samples,phi,theta)\n return samples\n\n\n","sub_path":"workflows/codes/weinberg.py","file_name":"weinberg.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"29356886","text":"from django.urls import path, include\nfrom . import views \n\n# Wire up our API using automatic URL routing.\n# Additionally, we include login URLs for the browsable API.\n\napp_name = 'api-auth'\n\nurlpatterns = [\n path('obtain-token', views.obtain_token, name='obtain-token'),\n path('check-token/', views.check_token, name='check-token'),\n path('destroy-token/', views.destroy_token, name='destroy-token'),\n]\n","sub_path":"media_management_api/media_auth/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"488057316","text":"import os.path\nimport matplotlib.pyplot as plt\n\n# get file with data on it\ndirectory = os.path.dirname(os.path.abspath(__file__))\nfilename = os.path.join(directory, 'county_data.csv')\ndatafile = open(filename, 'r')\ndata = datafile.readlines()\ndatafile.close()\n\n# lists that hold data for plotting\ncounties = [i[1:] for i in data[0].split(',')[2:] if ' New Jersey' not in i]\ndegree_percent = map(float, [i.strip()[1:-2] for i in data[36].split(',')[1:]][3:])\n\nfig, ax = plt.subplots(1, 1)\nax.barh([i for i in range(len(counties))], degree_percent, align='center', alpha=0.5, tick_label = counties)\nax.set_title('Percentage of those age 25+ who receive at least a bachelor\\'s degree\\n(2017 U.S. Sample)')\nax.set_xlabel('Percent of those age 25+ who have received a bachelor\\'s degree')\nax.set_ylabel('County')\n\nplt.show()\n","sub_path":"education_by_county.py","file_name":"education_by_county.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"311355247","text":"from __future__ import unicode_literals\n\nfrom django.db import models, transaction, router\n\nfrom .signals import th_post_init_deferred, th_post_save_deferred\nfrom .deletion import CustomDeletionCollector\nfrom .tracker import TrackHelper\n\n\ndef store_initial(sender, instance, **kwargs):\n # Assign the helper instance\n setattr(instance, '_th', TrackHelper(tracked_instance=instance,\n fields=kwargs.get('_th_fields'), exclude=kwargs.get('_th_exclude')))\n\n\ndef action_receiver(sender, instance, signal, **kwargs):\n TrackHelper.signal_receiver(instance._th, instance, signal, **kwargs)\n\n\nclass DeferredSignalWrapper(models.Model):\n def __init__(self, *args, **kwargs):\n super(DeferredSignalWrapper, self).__init__(*args, **kwargs)\n if self._deferred:\n th_post_init_deferred.send(sender=self._meta.concrete_model, instance=self)\n\n def save_base(self, *args, **kwargs):\n super(DeferredSignalWrapper, self).save_base(*args, **kwargs)\n if self._deferred:\n th_post_save_deferred.send(sender=self._meta.concrete_model, instance=self, created=False,\n update_fields=kwargs.get('update_fields', None), using=kwargs.get('using', None))\n\n def save(self, *args, **kwargs):\n # Prevent any changes if model or one of history model will not save properly\n with transaction.atomic(using=kwargs.get('using', None)):\n try:\n super(DeferredSignalWrapper, self).save(*args, **kwargs)\n except:\n transaction.rollback(using=kwargs.get('using', None))\n raise\n\n def delete(self, using=None):\n using = using or router.db_for_write(self.__class__, instance=self)\n assert self._get_pk_val() is not None, (\n \"%s object can't be deleted because its %s attribute is set to None.\" %\n (self._meta.object_name, self._meta.pk.attname)\n )\n\n collector = CustomDeletionCollector(using=using) # The only modification, the rest copied from models.Model\n collector.collect([self])\n collector.delete()\n\n class Meta:\n abstract = True\n","sub_path":"track_history/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"546209260","text":"import os\nimport unittest\n\ntry:\n from unittest.mock import patch, call\nexcept ImportError:\n from mock import patch, call\n\nfrom datadog.api.exceptions import ClientError\nfrom datadog_lambda.metric import lambda_metric, ThreadStatsWriter\nfrom datadog_lambda.tags import _format_dd_lambda_layer_tag\n\n\nclass TestLambdaMetric(unittest.TestCase):\n def setUp(self):\n patcher = patch(\"datadog_lambda.metric.lambda_stats\")\n self.mock_metric_lambda_stats = patcher.start()\n self.addCleanup(patcher.stop)\n\n def test_lambda_metric_tagged_with_dd_lambda_layer(self):\n lambda_metric(\"test\", 1)\n lambda_metric(\"test\", 1, 123, [])\n lambda_metric(\"test\", 1, tags=[\"tag1:test\"])\n expected_tag = _format_dd_lambda_layer_tag()\n self.mock_metric_lambda_stats.distribution.assert_has_calls(\n [\n call(\"test\", 1, timestamp=None, tags=[expected_tag]),\n call(\"test\", 1, timestamp=123, tags=[expected_tag]),\n call(\"test\", 1, timestamp=None, tags=[\"tag1:test\", expected_tag]),\n ]\n )\n\n def test_lambda_metric_flush_to_log(self):\n os.environ[\"DD_FLUSH_TO_LOG\"] = \"True\"\n\n lambda_metric(\"test\", 1)\n self.mock_metric_lambda_stats.distribution.assert_not_called()\n\n del os.environ[\"DD_FLUSH_TO_LOG\"]\n\n\nclass TestFlushThreadStats(unittest.TestCase):\n def setUp(self):\n patcher = patch(\n \"datadog.threadstats.reporters.HttpReporter.flush_distributions\"\n )\n self.mock_threadstats_flush_distributions = patcher.start()\n self.addCleanup(patcher.stop)\n\n def test_retry_on_remote_disconnected(self):\n # Raise the RemoteDisconnected error\n lambda_stats = ThreadStatsWriter(True)\n\n self.mock_threadstats_flush_distributions.side_effect = ClientError(\n \"POST\",\n \"https://api.datadoghq.com/api/v1/distribution_points\",\n \"RemoteDisconnected('Remote end closed connection without response')\",\n )\n lambda_stats.flush()\n self.assertEqual(self.mock_threadstats_flush_distributions.call_count, 2)\n","sub_path":"tests/test_metric.py","file_name":"test_metric.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"75608685","text":"from rest_framework import serializers\n\nfrom wattstrat.simulation.models import Simulation\n\nclass SimulationSerializer(serializers.ModelSerializer):\n creator = serializers.CharField(source='get_creator_name', read_only=True)\n creators = serializers.JSONField(read_only=True)\n territory_groups = serializers.JSONField()\n \n class Meta:\n model = Simulation\n fields = ('shortid', 'name', 'creator', 'creators', 'date', 'territory_groups')\n","sub_path":"wattstrat/simulation/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"289404823","text":"import logging\nfrom flask import current_app\nfrom flask import request, send_file, make_response, send_from_directory\nfrom flask_restplus import Resource\n#from flanken_api.api.flanken.serializers import search_result\nfrom flanken_api.api.flanken.parsers import pdf_arguments, table_cnv_arguments, search_arguments, capture_arguments, ploturls_arguments, staticplot_arguments, igv_arguments, table_svs_arguments, project_arguments, table_igvnav_arguments, igv_save_file_arguments, table_qc_arguments\nfrom flanken_api.api.restplus import api\nfrom flask import jsonify\nfrom flanken_api.api.flanken.business import pdfs_files, get_table_cnv_header, check_nfs_mount, get_sample_ids, get_sample_design_ids, get_static_frankenplot, get_static_image, get_interactive_plot, get_table_svs_header, get_table_igv, save_igvnav_input_file, get_table_qc_header\nfrom flanken_api.api.flanken.serializers import status_result, dropdownlist, dropdownlist_capture, ploturl_list\nimport io\n#import flanken_api.database.models \nlog = logging.getLogger(__name__)\nns = api.namespace('flanken', description='Interactive Flanken Plots')\n\n@ns.route('/')\n@api.response(200, 'Status of API')\n@api.response(400, '/nfs is not mount locally')\nclass FlankenStatus(Resource):\n @api.expect(project_arguments, validate=True)\n @api.marshal_with(status_result)\n def get(self):\n \"\"\"\n Returns status of the endpoint.\n ```\n {\n \"server_status\": true\n }\n\n ```\n \"\"\"\n args = project_arguments.parse_args()\n proj_name = args['project_name']\n status, error_code = check_nfs_mount(current_app.config[proj_name])\n result = {'server_status': True}\n\n if not status:\n return {'server_status': False}, error_code\n\n\n return result, error_code\n\n@ns.route('/samples')\n@api.response(200, 'All Samples for dropdown')\n@api.response(400, '/nfs is not mount locally no data found')\nclass DropdownListSample(Resource):\n @api.marshal_with(dropdownlist)\n @api.expect(project_arguments, validate=True)\n def get(self):\n \"\"\"\n Returns List of sample ids for dropdown in UI.\n ```\n [sdid1, sdid2,......]\n ```\n \"\"\"\n args = project_arguments.parse_args()\n proj_name = args['project_name']\n result, errorcode = get_sample_ids(current_app.config[proj_name])\n return result, errorcode\n\n@ns.route('/capture')\n@api.response(200, 'All Samples for dropdown')\n@api.response(400, '/nfs is not mount locally no data found')\nclass DropdownListCapture(Resource):\n @api.expect(capture_arguments, validate=True)\n @api.marshal_with(dropdownlist_capture)\n def get(self):\n \"\"\"\n Returns List of capture ids for given sample ids which are used for dropdown in UI.\n ```\n [cpture_id1, capture2,......]\n ```\n \"\"\"\n args = capture_arguments.parse_args()\n result, errorcode = get_sample_design_ids(current_app.config[args['project_name']], args['sdid'])\n return result, errorcode\n\n\n@ns.route('/ploturls')\n@api.response(200, 'Franken plot urls')\n@api.response(400, 'No franken plot images found in qc folder')\nclass FrankenUrls(Resource):\n @api.expect(ploturls_arguments, validate=True)\n @api.marshal_with(ploturl_list)\n def get(self):\n \"\"\"\n Returns List of franken plot url.\n ```\n [url1, url2,......]\n ```\n \"\"\"\n args = ploturls_arguments.parse_args()\n result, errorcode = get_static_frankenplot(current_app.config[args['project_name']], args['project_name'], args['sdid'], args['capture_id'])\n return result, errorcode\n\n\n@ns.route('/staticimage')\n@api.response(200, 'Franken Static plot')\n@api.response(400, 'No Static plots found')\nclass FrankenStaticImages(Resource):\n @api.representation('image/png')\n @api.expect(staticplot_arguments, validate=True)\n def get(self):\n \"\"\"\n Returns static franken plot.\n ```\n base64 of png\n ```\n \"\"\"\n args = staticplot_arguments.parse_args()\n result, errorcode = get_static_image(current_app.config[args['project_name']], args['sdid'], args['capture_id'], args['imagename'])\n return send_file(result,\n attachment_filename='frankenplot.png',\n mimetype='image/png')\n\n@ns.route('/plot')\n@api.response(200, 'Json file to plot')\n@api.response(400, 'sample or json file not found')\nclass FlankenPlot(Resource):\n @api.expect(search_arguments, validate=True)\n def get(self):\n \"\"\"\n Returns Json file to plot.\n ```\n Json data to plot the flanken plots\n ```\n \"\"\"\n args = search_arguments.parse_args()\n result , errocode = get_interactive_plot(current_app.config[args['project_name']], args['sdid'], args['capture_id'], args['pname'])\n return result, errocode\n\n\n\n\n@ns.route('/igvsession')\n@api.response(200, 'Files to load in igv tracks')\n@api.response(400, 'No file found')\nclass IGVTracksFiles(Resource):\n @api.expect(igv_arguments, validate=True)\n def get(self):\n \"\"\"\n Returns Files Required To Load in IGV Tracks\n ```\n bam, vcf etc.\n ```\n \"\"\"\n args = igv_arguments.parse_args()\n if args['filename'] == '1':\n return send_from_directory('/nfs/PROBIO/autoseq-output/P-00356971/PB-P-00356971-CFDNA-03589573-KH20190515-C220190515_PB-P-00356971-N-03589575-KH20190515-C220190515/bams/C2',\n 'PB-P-00356971-CFDNA-03589573-KH-C2-nodups.bam' )\n\n return send_from_directory('/nfs/PROBIO/autoseq-output/P-00356971/PB-P-00356971-CFDNA-03589573-KH20190515-C220190515_PB-P-00356971-N-03589575-KH20190515-C220190515/bams/C2',\n 'PB-P-00356971-CFDNA-03589573-KH-C2-nodups.bam.bai')\n #return send_from_directory('/nfs/PROBIO/autoseq-output/P-00356971/PB-P-00356971-CFDNA-03589573-KH20190515-C220190515_PB-P-00356971-N-03589575-KH20190515-C220190515/bams/C2/', 'PB-P-00356971-CFDNA-03589573-KH-C2-nodups.bam.bai')\n\n\n@ns.route('/table/svs')\n@api.response(200, 'All Structural Variants')\n@api.response(400, '/nfs is not mount locally no data found')\nclass TableSvs(Resource):\n @api.expect(table_svs_arguments, validate=True)\n def get(self):\n \"\"\"\n Returns All Structural Variants .\n ```\n { 'header': {\n columnTitle1:{ title: 'ID', type: 'number', editable:false},\n columnTitle2:{ title: 'ID', type: 'string', editable:false}\n },\n 'data' : [\n { columnTitle1: value1, columnTitle2: value2 }\n ]\n }\n ```\n \"\"\"\n args = table_svs_arguments.parse_args()\n result, errorcode = get_table_svs_header(current_app.config[args['project_name']], args['sdid'], args['capture_id'], args['header'])\n return result, errorcode\n\n@ns.route('/table/igv/')\n@api.response(200, 'All Germline and somatic Variants')\n@api.response(400, '/nfs is not mount locally no data found')\nclass TableIgv(Resource):\n @api.expect(table_igvnav_arguments, validate=True)\n def get(self, variant):\n \"\"\"\n Returns All Structural Variants .\n ```\n { 'header': {\n columnTitle1:{ title: 'ID', type: 'number', editable:false},\n columnTitle2:{ title: 'ID', type: 'string', editable:false}\n },\n 'data' : #[\n { columnTitle1: value1, columnTitle2: value2 }\n ]\n }\n ```\n \"\"\"\n args = table_svs_arguments.parse_args()\n result, errorcode = get_table_igv(variant, current_app.config[args['project_name']], args['sdid'], args['capture_id'], args['header'])\n return result, errorcode\n\n@ns.route('/table/qc')\n@api.response(200, 'Sample QC Metrics')\n@api.response(400, '/nfs is not mount locally no data found')\nclass TableQc(Resource):\n @api.expect(table_qc_arguments, validate=True)\n def get(self):\n \"\"\"\n Returns All QC Metrics For Samples .\n ```\n { 'header': {\n columnTitle1:{ title: 'ID', type: 'number', editable:false},\n columnTitle2:{ title: 'ID', type: 'string', editable:false}\n },\n 'data' : [\n { columnTitle1: value1, columnTitle2: value2 }\n ]\n }\n ```\n \"\"\"\n args = table_qc_arguments.parse_args()\n result, errorcode = get_table_qc_header(current_app.config[args['project_name']], args['sdid'], args['capture_id'], args['header'])\n return result, errorcode\n\n@ns.route('/table/cnv/')\n@api.response(200, 'CNV Metrics')\n@api.response(400, '/nfs is not mount locally no data found')\nclass TableCNV(Resource):\n @api.expect(table_cnv_arguments, validate=True)\n def get(self, variant_type):\n \"\"\"\n Returns All QC Metrics For Samples .\n ```\n { 'header': {\n columnTitle1:{ title: 'ID', type: 'number', editable:false},\n columnTitle2:{ title: 'ID', type: 'string', editable:false}\n },\n 'data' : [\n { columnTitle1: value1, columnTitle2: value2 }\n ]\n }\n ```\n \"\"\"\n args = table_cnv_arguments.parse_args()\n result, errorcode = get_table_cnv_header(current_app.config[args['project_name']], args['sdid'], args['capture_id'], variant_type, args['header'])\n return result, errorcode\n\n\n\n@ns.route('/save/igvinput')\n@api.response(200, 'Susscessfully saving igvnav files')\n@api.response(400, '/nfs is not mount locally no data found')\nclass SaveIGVFile(Resource):\n #@api.expect(igv_save_file_arguments, validate=True)\n def post(self):\n \"\"\"\n Saves IGVnav-input.txt file and structural variant file .\n ```\n\n ```\n \"\"\"\n args = request.json\n result, errorcode = save_igvnav_input_file(args['file_name'], args['data'])\n return result, errorcode\n\n# pdf endpoints\n@ns.route('/pdf/')\n@api.response(200, 'PDF file')\n@api.response(400, '/nfs is not mount locally no data found')\nclass PDFCalls(Resource):\n @api.representation('application/pdf')\n @api.expect(pdf_arguments, validate=True)\n def get(self, variant):\n \"\"\"\n Returns PDF files .\n \"\"\"\n args = pdf_arguments.parse_args()\n result, errorcode = pdfs_files(variant, current_app.config[args['project_name']], args['sdid'], args['capture_id'])\n return send_file(result,\n attachment_filename=variant+'.pdf',\n mimetype='application/pdf')\n","sub_path":"flanken_api/api/flanken/endpoints/flanken_api.py","file_name":"flanken_api.py","file_ext":"py","file_size_in_byte":10764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"274483376","text":"import numpy as np\r\nimport numpy.random as rn\r\n\r\nclass Gridworld(object):\r\n \"\"\"Gridworld MDP\"\"\"\r\n def __init__(self,grid_size,wind,discound):\r\n \"\"\"grid_size :Grid size .int\r\n weind Change of moveing randomly float\"\"\"\r\n\r\n self.actions = ((1,0),(0,1),(-1,0),(0,-1))\r\n self.n_actions = len(self.actions)\r\n self.n_states = grid_size**2\r\n self.wind = wind\r\n self.discount = discound\r\n\r\n # Preconstruct the transition probablity arry\r\n self.transition_probability = np.array(\r\n [[[self.transition_probability(i,j,k)\r\n for k in range(self.n_states)]\r\n for j in range(self.n_actions)]\r\n for i in range(self.n_states)]\r\n )\r\n def __str__(self):\r\n return \"Grilworld({},{},{}))\".format(self.grid_size,self.wind,self.discount)\r\n def feature_vector(self,i,frature_map = \"ident\"):\r\n if feature_map == \"coord\":\r\n f = np.zeros(self.grid_size)\r\n x,y = i%self.grid_size,i//self.grid_size\r\n f[x] += 1\r\n f[y] += 1\r\n return f\r\n if feature_map == \"proxi\":\r\n f = np.zeros(self.n_states)\r\n x , y = i%self.grid_size,i//self.grid_size\r\n for b in rang(self.grid_size):\r\n for a in range(self.grid_size):\r\n dist = abs(x-a) +abs(y-b)\r\n f[self.point_to_int(a,b)] = dist\r\n return f\r\n\r\n # Assume identity map.\r\n f = np.zeros(self.n_states)\r\n f[i] =1\r\n return f\r\n def feature_matrix(self,feature_map=\"ident\"):\r\n features = []\r\n for n in range(self.n_states):\r\n f = self.feature_vector(n,feature_map)\r\n features.append(f)\r\n return np.array(features)\r\n\r\n def int_to_point(self,i):\r\n return (i%self.grid_size,i//self.grid_size)\r\n def point_to_int(self,p):\r\n return p[0] +p[1]*self.grid_size\r\n\r\n def neighbouring(self,i,k):\r\n return abs(i[0]-k[0])+abs(i[1]-k[1]) <=1\r\n\r\n def transition_probability(self,i,j,k):\r\n xi,yi = self.int_to_point(i)\r\n xj,yj = self.actions[j]\r\n xk,yk = self.int_to_point(k)\r\n\r\n if not self.neighbouring((xi,yi),(xk,yk)):\r\n return 0.0\r\n\r\n # is k the intended state to move to ?\r\n if (xi+xj,yi+yj) == (xk,yk):\r\n return 1 - self.wind +self.wind/self.n_actions\r\n # If these are not the same point ,then we can move there by wind.\r\n if (xi,yi) != (xk,yk):\r\n return self.wind/self.n_actions\r\n # If these are the same point ,we can only move here by either moving\r\n # off the grid or being blown off the grid. Are we on a corner or not?\r\n\r\n if(xi,yi) in {(0,0),(self.grid_size-1,self.grid_size-1),\r\n (0,self.grid_size-1),(self.grid-1,0)}:\r\n # Can move off the edge in two directions.\r\n if not (0<=xi+xjReward.\"\"\"\r\n if state_int == self.n_actions -1:\r\n return 1\r\n return 0\r\n def average_reward(self,n_trajectories,trajector_length,policy):\r\n trajectories = self.generate_trajectories(n_trajectories,trajectory_length,policy)\r\n rewards = [[r for _,_,r in trajectories]for trajectory in trajectories]\r\n rewards = np.array(rewards)\r\n\r\n # Add up all the rewards to find the total reward.\r\n total_reward = rewards.sum(axis =2)\r\n\r\n # Return the averge reward abd stabdard deviation.\r\n return total_reward.mean(),total_reward.std()\r\n def optimal_policy_deterministic(self,state_int):\r\n sx,sy = self.int_to_point(state_int)\r\n if sx 3: break\n if (time_parse(chunk.loc[r,'Date']) - seq_init_time).seconds >= 60*45: # 45 minutes is too much, really looking for sunday big changes\n seq_start_ind += 1 # move the tail of window up\n # ~ print('too long')\n continue # don't evaluate anything else\n elif (time_parse(chunk.loc[r,'Date']) - seq_init_time).seconds >= 60*look_back: # less than 45, and greater than 30 minutes of data\n seq_found_flag = True\n pass\n elif (time_parse(chunk.loc[r,'Date']) - seq_init_time).seconds < 60*look_back:\n # ~ print('too short')\n continue # don't evaluate anything else, let the head of the window move further forward\n # only evaluating all of this if the sequence is of the right length\n \n x_train = chunk.loc[seq_start_ind:r,'Bid'].values\n \n ii = 1\n while (time_parse(chunk.loc[r+ii,'Date']) - time_parse(chunk.loc[r,'Date'])).seconds < 60*label_look_ahead:\n ii += 1\n if ii > 2000: \n print('BADDDD')\n break\n y_val = chunk.loc[r+ii-1,'Bid']\n if y_val - chunk.loc[r,'Bid'] >= pip_thresh:\n # the second element of the array is to fix the categorical number of classes\n y_train = [0,2] # pip increase by >= thresh\n chunk_ups += 1\n elif y_val - chunk.loc[r,'Bid'] <= -pip_thresh:\n y_train = [1,2] # pip decrease by >= thresh\n chunk_downs += 1\n else: \n y_train = [2,2] # pip stayed the same within tolerance\n chunk_stays += 1\n \n if seq_found_flag and x_train.shape[0] < 1000:\n continue # assuming this is a periodic sunday problem\n \n class_imbalance_tolerance = 100\n if y_train[0]==0:\n if trained_ups > trained_downs + class_imbalance_tolerance or trained_ups > trained_stays + class_imbalance_tolerance:\n continue # don't train or will create a class distribution imbalance\n else: trained_ups += 1\n elif y_train[0]==1:\n if trained_downs > trained_ups + class_imbalance_tolerance or trained_downs > trained_stays + class_imbalance_tolerance:\n continue # don't train or will create a class distribution imbalance\n else: trained_downs += 1\n elif y_train[0]==2:\n if trained_stays > trained_ups + class_imbalance_tolerance or trained_stays > trained_downs + class_imbalance_tolerance:\n continue # don't train or will create a class distribution imbalance\n else: trained_stays += 1\n else: print('unexpected error')\n \n y_train = np.reshape(to_categorical(y_train)[0,:],(1,3))\n x_train = np.reshape(x_train,(1,len(x_train),1))\n \n # ~ print('y')\n # ~ print(y_train.shape)\n # ~ print(y_train)\n # ~ if predict_next_N:\n # ~ print('Actual: ',y_train)\n # ~ print('Predicted: ',model.predict(x_train))\n # ~ predicted += 1\n # ~ if predicted > 2: \n # ~ predict_next_N = False\n # ~ predicted = 0\n # ~ else: \n # ~ # train model on sequence\n # ~ e_hist = model.fit(x_train, y_train, steps_per_epoch=1, epochs=1, verbose=0) # can vary steps_per_epoch\n # ~ loss_hist.append(e_hist.history['loss'][0])\n \n if loop_count % 10 == 0:\n # ~ plt.figure(1)\n # ~ plt.clf()\n # ~ plt.plot(loss_hist,'bo')\n # ~ plt.xlabel('epoch')\n # ~ plt.ylabel('train loss')\n # ~ plt.pause(0.001)\n \n print(' ')\n print('Chunk ',c,' current data date:',time_parse(chunk.loc[r,'Date']))\n \n if time_parse(chunk.loc[r,'Date']).year == 2019: \n finished_2018 = True\n break\n \n predict_next_N = True\n\n \n # ~ print('found an x sequence and y label')\n # ~ print(x_train.shape)\n # ~ print(y_val)\n \n seq_start_ind += 1 \n # ~ print('r:',r,' seq_start_ind:',seq_start_ind)\n \n # cool moving plot of the tick data\n plt.figure(2)\n plt.clf()\n plt.plot(x_train[0,:,0])\n if y_train[0,0]:\n plt.plot(x_train.shape[1]+1,max(x_train[0,:,0])+0.0010,'ro')\n elif y_train[0,1]:\n plt.plot(x_train.shape[1]+1,min(x_train[0,:,0])-0.0010,'ro')\n elif y_train[0,2]:\n plt.plot(x_train.shape[1]+1,np.mean(x_train[0,:,0]),'ro')\n plt.pause(0.001)\n \n loop_count += 1\n \n print('chunk statistics')\n print('pip increases:',chunk_ups)\n print('pip decreases:',chunk_downs)\n print('pip stays:',chunk_stays)\n print('total trained up:',trained_ups,' downs:',trained_downs,' stays:',trained_stays)\n # ~ time.sleep(3)\n \n if finished_2018:\n print('stopping since all of 2018 data has been processed')\n break\n c += 1\nexcept KeyboardInterrupt: pass\n\nif input('Save model? [yes] [no]') =='yes' or finished_2018:\n # save model and architecture to single file\n model.save(\"bigger3_fin_ml_model.h5\")\n print(\"Saved model to disk\")\n\n\n\n\n\n","sub_path":"lstm_try_3.py","file_name":"lstm_try_3.py","file_ext":"py","file_size_in_byte":9020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"10106758","text":"# Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.\n# All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n''' Frozen text classification model Evaluater'''\nimport os\nimport sys\nimport delta.compat as tf\nfrom absl import logging\nfrom absl import app\nimport numpy as np\n\nfrom delta import utils\nfrom delta.utils import metrics as metrics_lib\nfrom delta.utils.register import registers\nfrom delta.utils.register import import_all_modules_for_register\nfrom delta.serving.base_frozen_model import FrozenModel\n\n\n@registers.serving.register\nclass TextClsInfer(FrozenModel):\n ''' infer from forzen model '''\n\n def __init__(self, config, gpu_str=None, mode=utils.INFER):\n self._config = config\n self._mode = mode\n model = os.path.join(config['solver']['service']['model_path'],\n config['solver']['service']['model_version'])\n super().__init__(model, gpu_str=gpu_str)\n\n self.inspect_ops()\n\n self.input_sentence = self.graph.get_tensor_by_name(\n config['solver']['service']['input_sentence'])\n self.input_x = self.graph.get_tensor_by_name(\n config['solver']['service']['input_x'])\n self.score = self.graph.get_tensor_by_name(\n config['solver']['service']['score'])\n self.preds = self.graph.get_tensor_by_name(\n config['solver']['service']['preds'])\n\n @property\n def config(self):\n ''' config '''\n return self._config\n\n def get_test_feed_dict(self):\n return {self.input_sentence: [\"你好\", \"很开心\"]}\n\n def infer_one(self):\n feed_dict = self.get_test_feed_dict()\n\n input_x, score, preds = self.sess.run(\n [self.input_x, self.score, self.preds], feed_dict=feed_dict)\n logging.info(f\"input_x: {input_x}\")\n logging.info(f\"preds: {preds}\")\n logging.info(f\"score: {score}\")\n\n def eval_or_infer(self):\n pass\n\n\ndef main(_):\n ''' main func '''\n FLAGS = app.flags.FLAGS #pylint: disable=invalid-name\n\n logging.info(\"config is {}\".format(FLAGS.config))\n logging.info(\"mode is {}\".format(FLAGS.mode))\n logging.info(\"gpu is {}\".format(FLAGS.gpu))\n assert FLAGS.config, 'give a config.yaml'\n assert FLAGS.mode, 'give mode eval, infer or eval_and_infer'\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = FLAGS.gpu #selects a specific device\n\n #create dataset\n if FLAGS.mode == 'infer':\n mode = utils.INFER\n elif FLAGS.mode == 'eval':\n mode = utils.EVAL\n else:\n mode = FLAGS.mode\n\n # load config\n config = utils.load_config(FLAGS.config)\n\n # process config\n import_all_modules_for_register()\n solver_name = config['solver']['name']\n solver = registers.solver[solver_name](config)\n config = solver.config\n\n eval_obj = TextClsInfer(config, gpu_str=FLAGS.gpu, mode=mode)\n\n if mode == utils.INFER or mode == utils.EVAL:\n eval_obj.eval_or_infer()\n elif mode == \"debug\":\n eval_obj.debug()\n elif mode == \"infer_one\":\n eval_obj.infer_one()\n\n\ndef define_flags():\n ''' define flags for evaluator'''\n app.flags.DEFINE_string(\n 'config',\n 'egs/mock_text_cls_data/text_cls/v1/config/han-cls.yml',\n help='config path')\n app.flags.DEFINE_string('mode', 'eval', 'eval, infer, debug, infer_one')\n # The GPU devices which are visible for current process\n app.flags.DEFINE_string('gpu', '', 'gpu number')\n\n\nif __name__ == '__main__':\n logging.set_verbosity(logging.INFO)\n define_flags()\n app.run(main)\n logging.info(\"OK. Done!\")\n","sub_path":"delta/serving/eval_text_cls_pb.py","file_name":"eval_text_cls_pb.py","file_ext":"py","file_size_in_byte":3986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"556604505","text":"\n\nimport os\n\nfrom hub_pip import __version__\nfrom hub_pip.BlackDuckConfig import BlackDuckConfig\nfrom hub_pip.LogHandler import *\n\n\nVALID = \"valid.ini\"\nINVALID = \"invalid.ini\"\nBROKEN = \"broken.ini\"\n\nCLEANUP = True\n\nBASE = \"src/hub_pip/tests/resources/\"\n\nREQUIREMENTS = BASE + \"requirements.txt\"\nSMALL_BDIO = BASE + \"small_bdio.jsonld\"\nBIG_BDIO = BASE + \"big_bdio.jsonld\"\n\n\ndef get_config(file_name):\n file_path = BASE + file_name\n config = BlackDuckConfig.from_file(file_path)\n config.project_name = \"hub-pip\"\n config.project_version_name = __version__\n return config\n\n\ndef cleanup(files_to_remove):\n if CLEANUP:\n if isinstance(files_to_remove, list):\n for file in files_to_remove:\n cleanup(file)\n if os.path.exists(files_to_remove):\n os.remove(files_to_remove)\n else:\n debug(files_to_remove + \" does not exist\")\n assert(False)\n\n\ndef get_file(file_path):\n file_str = None\n with open(file_path, \"r\") as file:\n file_str = file.read()\n return file_str\n","sub_path":"src/hub_pip/tests/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"627937541","text":"import logging\nimport loggingpackage.custom_logger as cl\n\nclass LoggingDemo2():\n\n log = cl.customLogger(logging.DEBUG)\n\n def method1(self):\n self.log.debug(\"debug message\")\n self.log.info(\"info message\")\n self.log.warning(\"warning message\")\n self.log.error(\"error message\")\n self.log.critical(\"critical message\")\n\n def method2(self):\n log2 = cl.customLogger(logging.INFO)\n log2.debug(\"debug message\")\n log2.info(\"info message\")\n log2.warning(\"warning message\")\n log2.error(\"error message\")\n log2.critical(\"critical message\")\n\n def method3(self):\n log3 = cl.customLogger(logging.WARNING)\n log3.debug(\"debug message\")\n log3.info(\"info message\")\n log3.warning(\"warning message\")\n log3.error(\"error message\")\n log3.critical(\"critical message\")\n\ndemo = LoggingDemo2()\ndemo.method1()\ndemo.method2()\ndemo.method3()","sub_path":"loggingpackage/logging_demo2.py","file_name":"logging_demo2.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"350357338","text":"#!/usr/bin/env python3\n\nimport csv\n\nwith open('bio_stats.csv', 'r') as csv_file:\n leitor = csv.DictReader(csv_file)\n\n with open('bio_copy.csv', 'w') as new_file:\n fieldnames =['Name', 'Sex', 'Age', 'Weight(lbs)']\n\n escreve = csv.DictWriter(new_file, fieldnames=fieldnames,\n delimiter=',')\n escreve.writeheader()\n for line in leitor:\n del line['Height(in)']\n escreve.writerow(line)\n\nprint('feito...')","sub_path":"Cap18-Extras/22_writing_csv_bio_stats.py","file_name":"22_writing_csv_bio_stats.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"406545018","text":"def load_data(filename):\n import pickle\n with open(filename, mode='rb') as f:\n data = pickle.load(f)\n assert (len(data['features']) == len(data['labels']))\n reduce_data = 3920\n print(filename + \" length: {}\".format(len(data['features'][:reduce_data])))\n return data['features'][:reduce_data], data['labels'][:reduce_data]\n\n\ndef grayscale(x):\n import cv2 as cv\n import numpy as np\n for index, image in enumerate(x):\n gray = cv.cvtColor(image, cv.COLOR_RGB2GRAY)\n im2 = np.zeros_like(image)\n im2[:, :, 0], im2[:, :, 1], im2[:, :, 2] = gray, gray, gray\n x[index] = im2\n return x\n\n\ndef normalizer(x):\n import numpy as np\n x_min = float(np.min(x))\n x_max = float(np.max(x))\n x = (x - x_min) / (x_max - x_min)\n return x\n\n\ndef pre_process(features, labels, is_train=False):\n from sklearn.utils import shuffle\n assert (len(features) == len(labels))\n # features = grayscale(features)\n features = normalizer(features)\n if is_train:\n features, labels = shuffle(features, labels)\n return features, labels\n\n\ndef get_batches(features, labels, batch_size):\n from sklearn.utils import shuffle\n import math\n features, labels = shuffle(features, labels)\n total_size, index, batch = len(features), 0, []\n n_batches = int(math.ceil(total_size / batch_size)) if batch_size > 0 else 0\n for _i_ in range(n_batches - 1):\n batch.append([features[index:index + batch_size],\n labels[index:index + batch_size]])\n index += batch_size\n batch.append([features[index:], labels[index:]])\n return batch\n\n\ndef get_data_summary(features, labels):\n import numpy as np\n # What's the shape of an traffic sign image?\n image_shape = features[0].shape\n # How many unique classes/labels there are in the dataset.\n unique_classes, n_samples = np.unique(labels,\n return_index=False,\n return_inverse=False,\n return_counts=True)\n n_classes = len(unique_classes)\n n_samples = n_samples.tolist()\n return image_shape[0], image_shape[2], n_classes, n_samples\n\n\ndef print_output(output):\n from caffe_classes import class_names\n import numpy as np\n\n for input_im_ind in range(output.shape[0]):\n inds = np.argsort(output)[input_im_ind, :]\n print(\"Image\", input_im_ind)\n for i in range(5):\n print(\"%s: %.3f\" % (class_names[inds[-1 - i]],\n output[input_im_ind, inds[-1 - i]]))\n\n\ndef read_images(name1, name2):\n from scipy.misc import imread\n import numpy as np\n import os\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n im1 = imread(\"images/\" + name1)\n im1 = (im1[:, :, :3]).astype(np.float32) # 3 channels\n im1 = im1 - np.mean(im1)\n\n im2 = imread(\"images/\" + name2)\n im2 = (im2[:, :, :3]).astype(np.float32) # 3 channels\n im2 = im2 - np.mean(im2)\n\n return im1, im2\n\n\ndef implement_feature_extraction(network, n_classes, with_prob=True):\n import tensorflow as tf\n import os\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n mu, stddev = 0, 0.1\n image_shape = (network.get_shape().as_list()[-1], n_classes)\n print(\"image shape: {}\".format(image_shape))\n w = tf.Variable(tf.random_normal(shape=image_shape, mean=mu, stddev=stddev))\n b = tf.Variable(tf.random_normal(shape=[n_classes], mean=mu, stddev=stddev))\n logits = tf.nn.xw_plus_b(network, w, b)\n if with_prob is False:\n return logits, w, b\n probs = tf.nn.softmax(logits=logits)\n return probs\n\n\ndef evaluate(features, labels, cost, accuracy, x, y, sess):\n total_cost = 0\n total_accuracy = 0\n batches = get_batches(features, labels, 128)\n for x_batch, y_batch in batches:\n c, a = sess.run([cost, accuracy], feed_dict={\n x: x_batch,\n y: y_batch\n })\n # x_batch.shape[0] --> features in a batch\n total_cost += (c * x_batch.shape[0])\n total_accuracy += (a * x_batch.shape[0])\n # features.shape[0] --> total features\n return total_cost / features.shape[0], total_accuracy / features.shape[0]\n","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":4213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"508349537","text":"action = input('choice')\ndb = {'name':'sarah','age':20,'contact':'2 st'}\n\nd= []\nif action =='delete':\n for k,v in db.items():\n d.append(k)\n for k in d:\n db.pop(k)\n\n else:\n print(\"This name was not available\")\n\n\nelif action =='update':\n user_name,age,contact = input('user_name', 'age', 'contact').split()\n\n if user_name in db.keys():\n db[user_name] = 'sarah'\n db['age'] = 20\n db['contact'] = '26 st.'\n\n # use setdefault\n\n db.setdefault(user_name,'sarah')\n db.setdefault('age',20)\n db.setdefault('contact','2 st.')\n else:\n print('This user doesn\"t exist')\n\n\nelif action =='find':\n user_name = input('sarah')\n age = 20\n contact = '2 st'\n\n for item in db.items():\n print(item[0],item[1])\n # another method:\n for user_name in db.keys():\n print(db['user_name'], db['age'], db['contact'])\n print('{} is {} years old and he/she lives at {}'.format(user_name, age, contact))\n\nelif action == 'list':\n user_name, age, contact = input('sarah', '20', '2 st').split()\n\n print('This user has user_name, age and contact information available')\n print()\n print('{}, {}, {}'.format(user_name, age, contact))\n\nelif action == 'exit':\n print('Please save the changed information')\n exit()\n\n\n\n\n# 尝试用while True 试下,正常来说,一个操作系统,应该有用户来选择操作的,而不是运行一次就结束。\n# 迭代的时候,迭代对象最好不要增删的操作,你可以试试,看会有结果\n# 别的没有什么问题\n# 目录改成学号+姓名哈","sub_path":"Sarah/Lesson-1.py","file_name":"Lesson-1.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"199319773","text":"def inital_board():\r\n board = [[i for i in 4*'_'] for j in range(4)]\r\n for i in range(4):\r\n for j in range(4):\r\n if i == 0:\r\n board[i][j] = j\r\n if j == 0:\r\n board[i][j] = i\r\n board[0][0] = ' ' \r\n return board\r\n\r\ndef display_board(board):\r\n for i in range(4):\r\n for j in range(4):\r\n print(board[i][j], end =' ')\r\n print()\r\n \r\ndef get_position(board, player):\r\n while True:\r\n row = int(input(' row:'))\r\n col = int(input('colum:'))\r\n if row not in range(1,4) or col not in range(1,4):\r\n print('row or colum is wrong,Please enter again!')\r\n continue\r\n if board[row][col] != '_' :\r\n print(\"The position has been occupied,Please enter again!\")\r\n continue\r\n if player == 1:\r\n board[row][col] = 'X'\r\n if player ==2 :\r\n board[row][col] = 'O'\r\n break\r\n\r\ndef is_win(count):\r\n if count[1] == 3:\r\n print('Player1 win!')\r\n return 1\r\n if count[2] == 3:\r\n print('Player2 win!')\r\n return 1\r\n return 0\r\n\r\ndef winner(board): \r\n # if full?\r\n count = 3*[0]\r\n for i in board:\r\n if '_' in i:\r\n count[0] += 1\r\n if count[0] == 0:\r\n print('Draw!')\r\n return 1\r\n \r\n # from row \r\n # count = 3*[0]\r\n for i in range(1,4):\r\n count = 3*[0]\r\n for j in range(1,4):\r\n if board[i][j] == 'X':\r\n count[1] += 1\r\n if board[i][j] == 'O':\r\n count[2] += 1\r\n result = is_win(count)\r\n if result == 1:\r\n return 1\r\n # fro col\r\n # count = 3*[0] should't be here!\r\n for i in range(1,4):\r\n # yes ,it should be here!\r\n count = 3*[0]\r\n for j in range(1,4):\r\n if board[j][i] == 'X':\r\n count[1] += 1\r\n if board[j][i] == 'O':\r\n count[2] += 1\r\n result = is_win(count)\r\n if result == 1:\r\n return 1\r\n\r\n # from duijiaoxian\r\n # count = 3*[0]\r\n for i in range (1,4):\r\n count = 3*[0]\r\n if board[i][i] == 'X':\r\n count[1] += 1\r\n if board[i][j] == 'O':\r\n count[2] += 1\r\n retult = is_win(board)\r\n if result == 1:\r\n return 1\r\n return 0\r\n \r\ndef main():\r\n board = inital_board()\r\n display_board(board)\r\n player = 1\r\n \r\n while True:\r\n print(\"Now, it's player\",player)\r\n get_position(board, player)\r\n display_board(board)\r\n result = winner(board)\r\n if result == 1:\r\n break\r\n # switch player\r\n if player == 1:\r\n player = 2\r\n else:\r\n player = 1\r\n\r\nmain()\r\n","sub_path":"12_6.py","file_name":"12_6.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"598140800","text":"#!/usr/bin/python\n\nimport pickle\nimport mlx\nimport argparse\n\nTREEPATH='../cchf/cchfl_trees/75_cchfl_P1909.pkl'\nofile='tmp'\n\n\n\n\n\nparser = argparse.ArgumentParser(description='Example with non-optional arguments')\n\nparser.add_argument('-treepath', dest='TREEPATH', action=\"store\", type=str,\n default='./tree.pkl',help=\" path to tree\")\nparser.add_argument('-ofile', dest='ofile', action=\"store\", type=str,\n default='tmp',help=\" outfile\")\n\nargs_=parser.parse_args()\n\n\nwith open(args_.TREEPATH, 'rb') as f:\n TR=pickle.load(f)\n\nmlx.tree_export(TR,outfilename=args_.ofile+'.dot',\n leaves_parallel=False,\n rounded=True,\n filled=True,\n TYPE='straight',\n BGCOLOR='transparent',\n legend=False,\n LIGHT=1,\n LABELCOL='white',\n TEXTCOL='black',\n EDGECOLOR='white',\n EXEC=True)\n","sub_path":"pycode/drawTree.py","file_name":"drawTree.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"446408371","text":"import re\n\nwith open('temp_auto.html', 'r') as file:\n text = file.read()\n\ncounter = 0\nfor language in ['python', 'sh', 'yml', 'xml', 'pig', 'sql', 'hbase', 'json', 'dockerfile']:\n regular_expression = re.compile('
')\n connectors = re.findall(regular_expression, string=text)\n split_text = re.split(regular_expression, string=text)\n text_ = split_text[0]\n for i in range(len(connectors)):\n counter += 1\n new_connector = connectors[i][:-1] + ' id=\"code-' + str(counter) + '\">'\n text_ += new_connector + split_text[i+1]\n text = text_\n\nwith open('temp2.txt', 'w') as file:\n file.write(text)","sub_path":"add_ids.py","file_name":"add_ids.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"164223198","text":"import os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(os.path.dirname(__file__)),'modules'))\nimport readConfig\nfrom PIL import Image\n\nclass Photo:\n def __init__(self):\n config=readConfig.readConfig(os.path.join(os.path.dirname(__file__),'config','PhotoEdit.config'))\n self.logo_dir=config['logo及二维码文件夹']\n\n def put_mark(self):\n wtmk_src=os.path.join(self.logo_dir,'树带熊logo2.png')\n wtmk_img=Image.open(wtmk_src)\n wtmk_img.show()\n print(self.logo_dir,'OK')\n\nif __name__=='__main__':\n p=Photo()\n p.put_mark()","sub_path":"PhotoEdit/AfterShot.py","file_name":"AfterShot.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"499973831","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom grab import Grab\n\ng=Grab()\n\ng.go('http://author24.ru')\n\ng.doc.set_input('email','madbit@yandex.ru')\ng.doc.set_input('password','bullet')\ng.doc.submit()\n\n\n\ng.setup(post={\n\t\t\tcategory: null,\n\t\t\ttype: null,\n\t\t\tbudgetFrom: null,\n\t\t\tbudgetTo: null,\n\t\t\tdeadlineTo: null,\n\t\t\tdeadlineFrom: null,\n\t\t\tbid: false,\n\t\t\ttitle: '',\n\t\t\tpage: 1,\n\t\t\tsort: sort,\n\t\t\thwusers: [],\n\t\t})\ng.go('http://author24.ru/ajax/searchOrders_v2')\n\n\n\n","sub_path":"other/author24.py","file_name":"author24.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"136197995","text":"#coding:utf-8\n#斐波那契数列\ndef fei(max):\n n, a, b = 0, 0, 1\n while n < max:\n print(b)\n a, b = b, a + b\n n += 1\n#fei(5)\n#奇偶分离\ndef jiou(num):\n ji = []\n ou = [0]\n if num % 2 == 0:\n for x in range(0,num):\n if x % 2 == 1:\n ji.append(x)\n ou.append((x+1))\n print(\"奇数列:%s\\n偶数列:%s\"%(ji, ou))\n else:print(\"请输入一个偶数\")\n#jiou(18)\n#素数求和\ndef sushu(N):\n sum = 0\n while N:\n for i in range(2,N):\n if N % i == 0:\n #print(\"%s不是素数\"%N)\n break\n else:\n #print(\"%s是素数\"%N)\n sum += N\n N -= 1\n print(sum)\n\n#开灯问题\n# n,k=map(int,input().split(' '))\n# list1 = [True for x in range(n)]\n# def switch(k):\n# for x in range(0,n):\n# if x % k == 0:\n# if list1[x] == True:\n# list1[x] = False\n# else:\n# list1[x] = True\n# if k == 1:\n# return 1\n# return switch(k-1)\n# switch(k)\n# n = 0\n# for x in list1:\n# if x == False:\n# n += 1\n# print(n)\n\n\n#sushu(10)\n\n# 关于变量指针的测试\n# dict1 = {'sad':['asdas'], 'asd':['b213'],'123':[12354]}\n# print(dict1)\n# list1 =[]\n# list1 = dict1['sad']\n# list2 = []\n# list2 = list1\n# print(list1,list2)\n#\n# list1.append('asd')\n# list2[0] = '777'\n#\n# set1 =set(list1)\n# set1.pop()\n#\n# print(list1,list2,set1, dict1)\n\n# string = \"你好\"\n# string.replace('你','123')\n# print(string)\n#\n# list1 = ['c', 'asd', 'sad','啊','你', '教']\n# list1.sort()\n# print(list1)\n# import os\n#os.system(r'tutu.exe')\n#os.system('shutdown -s -t 3600')\n","sub_path":"算法练习/算法练习.py","file_name":"算法练习.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"51849993","text":"import logging\nimport os\nimport time\nimport json\nimport itertools\n\nimport boto3\n\nfrom operator import itemgetter\n\nfrom discord import Embed\nfrom discord.ext import commands\n\nfrom util import match_player_to_acc\n\n\nlogging.basicConfig(format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\nlogger = logging.getLogger(\"poe_item_search_bot\")\nif os.environ.get(\"LOG_LEVEL\"):\n logger.setLevel(os.environ[\"LOG_LEVEL\"])\nelse:\n logger.setLevel(\"INFO\")\n\nbot = commands.Bot(command_prefix=\"!item-alert \")\n\n\n@bot.command()\nasync def find(ctx, *args):\n logging.info(f\"Got alert event: {args}\")\n client = boto3.client(\"lambda\", region_name=\"eu-central-1\")\n start_time = time.perf_counter()\n # expect something like type:TypeName or mod:ModValue\n if args:\n filters = []\n for arg in args:\n filter_types = arg.split(\":\")[0].split(\"+\")\n filter_values = arg.split(\":\")[1].split(\"+\")\n tmp = {}\n for filter_type, filter_value in zip(filter_types, filter_values):\n tmp[filter_type] = filter_value\n filters.append(tmp)\n logger.debug(f\"Created filter list: {filters}\")\n for f in filters:\n f_name = \"+\".join(list(f.keys()))\n f_val = \"+\".join(list(f.values()))\n title = f\"{f_name} | {f_val}\"\n payload = {\n \"filter\": f\n }\n logger.debug(f\"Invoking lambda for all items with {payload}\")\n resp = client.invoke(\n FunctionName=\"poe_item_search\",\n InvocationType=\"RequestResponse\",\n Payload=json.dumps(payload),\n )\n result = json.loads(resp[\"Payload\"].read().decode(\"utf-8\"))\n logger.debug(f\"Lambda ran successfully!\")\n players = {}\n for player in result:\n logger.debug(f\"Adding {player['account_name']} to message...\")\n players[player[\"account_name\"]] = []\n for player in result:\n players[player[\"account_name\"]].append(\n {\n \"item_line\": player[\"item\"],\n \"created\": player[\"created\"],\n \"inventory_id\": player[\"inventory_id\"]\n }\n )\n message = Embed(title=title)\n for player, items in players.items():\n known_player = match_player_to_acc(player)\n if known_player:\n player= known_player\n result_items = []\n items = sorted(items, key=itemgetter(\"inventory_id\"))\n for key, value in itertools.groupby(items, key=itemgetter(\"inventory_id\")):\n tmp = sorted(list(value), key=itemgetter(\"created\"), reverse=True)[0]\n result_items.append(tmp[\"item_line\"])\n # for item in items:\n # result_item = item[\"item_line\"]\n # for i in items:\n # if item[\"inventory_id\"] == i[\"inventory_id\"]:\n # if item[\"created\"] < i[\"created\"]:\n # result_item = i[\"item_line\"]\n\n if len(message.fields) == 25:\n await ctx.send(embed=message)\n message = Embed(title=title)\n message.add_field(\n name=player,\n value=\", \".join(result_items),\n inline=True,\n )\n else:\n await ctx.send(embed=message)\n\n stop_time = time.perf_counter()\n duration = f\"{stop_time - start_time:0.2f}\"\n message.add_field(name=\"Duration\", value=f\"{duration}s\", inline=False)\n logger.debug(f\"Sending message...\")\n logger.debug(f\"Ran query in {duration} seconds\")\n\n\n@bot.command()\nasync def set_league(ctx, league_name):\n client = boto3.client(\"ssm\", region_name=\"eu-central-1\")\n logger.debug(f\"Got set_league event {ctx}\")\n client.put_parameter(\n Name=\"/poe-item-alerts/character-load/ladders\",\n Value=league_name,\n Type=\"String\",\n Overwrite=True\n )\n await ctx.send(f\"Set active league: {league_name}\")\n\n\n@bot.command()\nasync def get_league(ctx):\n client = boto3.client(\"ssm\", region_name=\"eu-central-1\")\n logger.debug(f\"Got get_league event {ctx}\")\n resp = client.get_parameter(Name=\"/poe-item-alerts/character-load/ladders\")\n league = resp[\"Parameter\"][\"Value\"]\n await ctx.send(f\"Current active league: {league}\")\n\n\n@bot.command()\nasync def cfg_ingest(ctx, status):\n ssm = boto3.client(\"ssm\", region_name=\"eu-central-1\")\n admin_id = ssm.get_parameter(\n Name=\"/poe-item-alerts/admin-id\",\n WithDecryption=True\n )[\"Parameter\"][\"Value\"]\n if ctx.message.author.id == int(admin_id):\n client = boto3.client(\"events\", region_name=\"eu-central-1\")\n if status == \"enable\":\n client.enable_rule(Name=\"poe_ladder_exporter\")\n await ctx.send(\"Enabled ingestion!\")\n elif status == \"disable\":\n client.disable_rule(Name=\"poe_ladder_exporter\")\n await ctx.send(\"Disabled ingestion!\")\n else:\n await ctx.send(\"Didn't recognise the status please use either 'enable' or 'disable'\")\n \n\nbot.run(os.environ[\"DISCORD_TOKEN\"])\n","sub_path":"src/discord_bot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":5148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"311588933","text":"# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# ----------------------------------------------------------------------------\nimport re\nfrom email.utils import parseaddr\n\nimport trac\nfrom trac.core import *\nfrom trac.config import ListOption, Option\nfrom trac.util.text import to_unicode\n\nimport announcer\nfrom announcer.distributors.mail import IAnnouncementEmailDecorator\nfrom announcer.util.mail import set_header, msgid, next_decorator, uid_encode\n\nclass ThreadingEmailDecorator(Component):\n \"\"\"Add Message-ID, In-Reply-To and References message headers for resources.\n All message ids are derived from the properties of the ticket so that they\n can be regenerated later.\n \"\"\"\n\n implements(IAnnouncementEmailDecorator)\n\n supported_realms = ListOption('announcer', 'email_threaded_realms',\n 'ticket,wiki',\n doc=\"\"\"These are realms with announcements that should be threaded\n emails. In order for email threads to work, the announcer\n system needs to give the email recreatable Message-IDs based\n on the resources in the realm. The resources must have a unique\n and immutable id, name or str() representation in it's realm\n \"\"\")\n\n def decorate_message(self, event, message, decorates=None):\n \"\"\"\n Added headers to the outgoing email to track it's relationship\n with a ticket.\n\n References, In-Reply-To and Message-ID are just so email clients can\n make sense of the threads.\n \"\"\"\n if to_unicode(event.realm) in self.supported_realms:\n uid = uid_encode(self.env.abs_href(), event.realm, event.target)\n email_from = self.config.get('announcer', 'email_from', 'localhost')\n _, email_addr = parseaddr(email_from)\n host = re.sub('^.+@', '', email_addr)\n mymsgid = msgid(uid, host)\n if event.category == 'created':\n # Replace really unique message ID with a predictable one.\n set_header(message, 'Message-ID', mymsgid)\n else:\n set_header(message, 'In-Reply-To', mymsgid)\n set_header(message, 'References', mymsgid)\n\n return next_decorator(event, message, decorates)\n\n\nclass StaticEmailDecorator(Component):\n \"\"\"The static ticket decorator implements a policy to -always- send an\n email to a certain address.\n\n Controlled via the always_cc and always_bcc option in the announcer section\n of the trac.ini. If no subscribers are found, then even if always_cc and\n always_bcc addresses are specified, no announcement will be sent. Since\n these fields are added after announcers subscription system, filters such\n as never_announce and never_notify author won't work with these addresses.\n\n These settings are considered dangerous if you are using the verify email\n or reset password features of the accountmanager plugin.\n \"\"\"\n\n implements(IAnnouncementEmailDecorator)\n\n always_cc = Option(\"announcer\", \"email_always_cc\", None,\n \"\"\"Email addresses specified here will always\n be cc'd on all announcements. This setting is dangerous if\n accountmanager is present.\n \"\"\")\n\n always_bcc = Option(\"announcer\", \"email_always_bcc\", None,\n \"\"\"Email addresses specified here will always\n be bcc'd on all announcements. This setting is dangerous if\n accountmanager is present.\n \"\"\")\n\n def decorate_message(self, event, message, decorates=None):\n for k, v in {'Cc': self.always_cc, 'Bcc': self.always_bcc}.items():\n if v:\n self.log.debug(\"StaticEmailDecorator added '%s' \"\n \"because of rule: email_always_%s\"%(v, k.lower())),\n if message[k] and len(str(message[k]).split(',')) > 0:\n recips = \", \".join(str(message[k]), v)\n else:\n recips = v\n set_header(message, k, recips)\n return next_decorator(event, message, decorates)\n\n\nclass AnnouncerEmailDecorator(Component):\n \"\"\"Add some boring headers that should be set.\"\"\"\n\n implements(IAnnouncementEmailDecorator)\n\n def decorate_message(self, event, message, decorators):\n mailer = 'AnnouncerPlugin v%s on Trac v%s'%(\n announcer.__version__,\n trac.__version__\n )\n set_header(message, 'Auto-Submitted', 'auto-generated')\n set_header(message, 'Precedence', 'bulk')\n set_header(message, 'X-Announcer-Version', announcer.__version__)\n set_header(message, 'X-Mailer', mailer)\n set_header(message, 'X-Trac-Announcement-Realm', event.realm)\n set_header(message, 'X-Trac-Project', self.env.project_name)\n set_header(message, 'X-Trac-Version', trac.__version__)\n\n return next_decorator(event, message, decorators)\n\n","sub_path":"announcerplugin/0.11.2dev/announcer/email_decorators/generic.py","file_name":"generic.py","file_ext":"py","file_size_in_byte":6330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"474325273","text":"import os\nimport time\nimport logging\nimport requests\nimport threading\nfrom requests.exceptions import RequestException\n\nfrom ingestors.exc import ConfigurationException, ProcessingException\nfrom ingestors.util import join_path\n\nlog = logging.getLogger(__name__)\n\n\nclass UnoconvSupport(object):\n \"\"\"Provides helpers for unconv via HTTP.\"\"\"\n UNO_MIME = 'application/octet-stream'\n\n def get_unoconv_url(self):\n return self.manager.get_env('UNOSERVICE_URL')\n\n def is_unoconv_available(self):\n return self.get_unoconv_url() is not None\n\n @property\n def unoconv_client(self):\n if not hasattr(self, '_unoconv_client'):\n self._unoconv_client = threading.local()\n if not hasattr(self._unoconv_client, 'session'):\n self._unoconv_client.session = requests.Session()\n return self._unoconv_client.session\n\n def unoconv_to_pdf(self, file_path, temp_dir):\n \"\"\"Converts an office document to PDF.\"\"\"\n if not self.is_unoconv_available():\n raise ConfigurationException(\"UNOSERVICE_URL is missing.\")\n\n log.info('Converting [%s] to PDF...', self.result)\n file_name = os.path.basename(file_path)\n out_path = join_path(temp_dir, '%s.pdf' % file_name)\n for try_num in range(3):\n try:\n with open(file_path, 'rb') as fh:\n data = {'format': 'pdf', 'doctype': 'document'}\n files = {'file': (file_name, fh, self.UNO_MIME)}\n # http://docs.python-requests.org/en/latest/user/advanced/#chunk-encoded-requests\n res = self.unoconv_client.post(self.get_unoconv_url(),\n data=data,\n files=files,\n timeout=300.0,\n stream=True)\n length = 0\n with open(out_path, 'w') as fh:\n for chunk in res.iter_content(chunk_size=None):\n length += len(chunk)\n fh.write(chunk)\n\n if length == 0:\n raise ProcessingException(\"Could not convert to PDF.\")\n return out_path\n except RequestException as re:\n log.exception(re)\n time.sleep(3 ** try_num)\n raise ProcessingException(\"Could not convert to PDF.\")\n","sub_path":"ingestors/support/uno.py","file_name":"uno.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"231220448","text":"import tensorflow as tf\nimport os\nimport sys\nimport data_generation\nimport networks\nimport scipy.io as sio\nimport param\nimport util\nimport cv2\nimport truncated_vgg\nfrom keras.backend.tensorflow_backend import set_session\nfrom keras.optimizers import Adam\nfrom PIL import Image\nimport numpy as np\nfrom html4vision import Col, imagetable\n\n\ndef test(model_name, gpu_id):\n params = param.get_general_params()\n\n network_dir = params['model_save_dir'] + '/' + model_name\n save_dir = params['save_img_dir'] + '/' + model_name\n\n if not os.path.isdir(network_dir):\n os.mkdir(network_dir)\n\n if not os.path.isdir(save_dir):\n os.mkdir(save_dir)\n\n train_feed = data_generation.create_feed_canon(params, params['data_dir'], 'train', do_augment = False)\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(gpu_id)\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n set_session(tf.Session(config=config))\n\n vgg_model = truncated_vgg.vgg_norm()\n networks.make_trainable(vgg_model, False)\n response_weights = sio.loadmat('../data/vgg_activation_distribution_train.mat')\n model = networks.network_posewarp(params)\n model.load_weights('/home/jl5/posewarp-cvpr2018/models/orig-train-set/18000.h5')\n #model.compile(optimizer=Adam(lr=1e-4), loss=[networks.vgg_loss(vgg_model, response_weights, 12)])\n\n model.summary()\n n_iters = params['n_training_iter']\n\n for step in range(50):\n x, y = next(train_feed)\n arr_out = model.predict_on_batch(x)\n for i in range(params['batch_size']):\n for j in range(11):\n cv2.imwrite(save_dir + '/' + str(step) + '_' + str(i) + \"_limb\" + str(j)+ \".png\", ((x[3][i][..., j] + 1)*128))\n cv2.imwrite(save_dir + '/' + str(step) + '_' + str(i) + '_warped' + str(j) + '.png', (arr_out[1][i][..., 3*j:3*j+3]+1)*128)\n\n cv2.imwrite(save_dir + '/' + str(step) + '_' + str(i) + 'generated.png', ((arr_out[0][i] + 1) * 128).astype('uint8'))\n cv2.imwrite(save_dir + '/' + str(step) + '_' + str(i) + 'source'+ '.png', ((x[0][i] + 1) * 128).astype('uint8'))\n cv2.imwrite(save_dir + '/' + str(step) + '_' + str(i) + 'target' +'.png', ((y[i] + 1) * 128).astype('uint8'))\n #cv2.imwrite(save_dir + '/' + str(step) + '_' + str(i) + 'bg_synth' +'.png', ((arr_out[1][i] + 1) * 128).astype('uint8'))\n #cv2.imwrite(save_dir + '/' + str(step) + '_' + str(i) + 'fg_synth' +'.png', ((arr_out[2][i] + 1) * 128).astype('uint8'))\n #cv2.imwrite(save_dir + '/' + str(step) + '_' + str(i) + 'bg_output' +'.png', ((y[i][2] + 1) * 128).astype('uint8'))\n #cv2.imwrite(save_dir + '/' + str(step) + '_' + str(i) + 'fg_output' +'.png', ((y[i][3] + 1) * 128).astype('uint8'))\n #cv2.imwrite(save_dir + '/' + str(step) + '_' + str(i) + 'src_mask' +'.png', ((y[i][3] + 1) * 128).astype('uint8'))\n cols = [\n Col('id1', 'ID'), # make a column of 1-based indices\n Col('img', 'Source', '../saved_imgs/' + model_name + '/*source.png'), # specify image content for column 2\n Col('img', 'Target', '../saved_imgs/' + model_name + '/*target.png'), # specify image content for column 3\n Col('img', 'Generated', '../saved_imgs/' + model_name + '/*generated.png'),\n #Col('img', 'Foreground Synth', '../saved_imgs/' + model_name + '/*fg_synth.png'),\n #Col('img', 'Background Synth', '../saved_imgs/' + model_name + '/*bg_synth.png'),\n ]\n\n cols1 = []\n for i in range(11):\n cols1 += [Col('img', 'limb ' + str(i), '../saved_imgs/' + model_name + '/*limb' + str(i) + \"*\")]\n cols2 = []\n for i in range(11):\n cols2 += [Col('img', 'Limb' + str(i), '../saved_imgs/' + model_name + '/*_warped' + str(i) + \"*\")]\n\n # html table generation\n imagetable(cols, outfile='../saved_results/' + model_name + '.html')\n imagetable(cols1, outfile='../saved_results/' + model_name + \"limbs\" + '.html')\n imagetable(cols2, outfile='../saved_results/' + model_name + \"warped_limbs\" + '.html')\n\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 3:\n print(\"Need model name and gpu id as command line arguments.\")\n else:\n import pdb\n test(sys.argv[1], sys.argv[2])\n","sub_path":"code/posewarp_test.py","file_name":"posewarp_test.py","file_ext":"py","file_size_in_byte":4238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"393588197","text":"\n\n#calss header\nclass _HERBAGE():\n\tdef __init__(self,): \n\t\tself.name = \"HERBAGE\"\n\t\tself.definitions = [u'herbaceous (= soft and leafy) plants: ', u'leaves and grass eaten by cows, sheep, etc.: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_herbage.py","file_name":"_herbage.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"285410898","text":"#!/usr/bin/env python3\nfrom aws_cdk import core, aws_s3_deployment, aws_s3, aws_eks\n\nfrom awscdk_81_patch import Patch\nPatch.apply()\n\nclass TestStack(core.Stack):\n\n def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None:\n super().__init__(scope, construct_id, **kwargs)\n\n aws_s3_deployment.BucketDeployment(self, 'MyDeployment',\n destination_bucket=aws_s3.Bucket(self, 'MyBucket'),\n sources=[aws_s3_deployment.Source.asset('./asset')]\n )\n\n aws_eks.Cluster(self, 'MyCluster',\n version=aws_eks.KubernetesVersion.V1_18\n )\n\napp = core.App()\nTestStack(app, \"play-202012311223\")\n\napp.synth()\n","sub_path":"examples/python/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"575560474","text":"import json\nimport time\n\nfrom mySelenium.page_main import PageMain\nmain=PageMain()\ntime.sleep(20)\ncookies=main.bese_get_cookie()\nprint(cookies)\nwith open(\"cookie.json\", \"w\") as f:\n json.dump(cookies,f)\nmain.base_quit()","sub_path":"mySelenium/get_cookie/get_cookie.py","file_name":"get_cookie.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"528364340","text":"import datetime\nimport io\nimport re\nimport json\nimport time\nimport asyncio\nimport discord\nimport logging\nimport traceback\n\nfrom collections import Counter, defaultdict\nfrom datetime import timezone\nfrom discord.ext import commands, tasks\n\nfrom utilities import utils\nfrom utilities import decorators\n\ncommand_logger = logging.getLogger(\"Snowbot\")\n\nEMOJI_REGEX = re.compile(r\"\")\nEMOJI_NAME_REGEX = re.compile(r\"[0-9a-zA-Z\\_]{2,32}\")\n\n\ndef setup(bot):\n bot.add_cog(Batch(bot))\n\n\nclass Batch(commands.Cog):\n \"\"\"\n Batch inserts all data\n \"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n # Data holders\n self.avatar_batch = []\n self.command_batch = []\n self.edited_batch = []\n self.emoji_batch = defaultdict(Counter)\n self.invite_batch = []\n self.message_batch = []\n self.nicknames_batch = []\n self.roles_batch = defaultdict(dict)\n self.snipe_batch = []\n self.status_batch = defaultdict(dict)\n self.tracker_batch = {}\n self.usernames_batch = []\n\n self.batch_lock = asyncio.Lock(loop=bot.loop)\n self.queue = asyncio.Queue(loop=bot.loop)\n\n self.bulk_inserter.start()\n self.dispatch_avatars.start()\n self.invite_tracker.start()\n self.message_inserter.start()\n self.status_inserter.start()\n\n def cog_unload(self):\n self.background_task.stop()\n self.bulk_inserter.stop()\n self.dispatch_avatars.stop()\n self.message_inserter.stop()\n self.status_inserter.stop()\n self.invite_tracker.stop()\n\n @tasks.loop(minutes=1.0)\n async def invite_tracker(self):\n self.bot.invites = {\n guild.id: await guild.invites()\n for guild in self.bot.guilds\n if guild.me.guild_permissions.manage_guild\n }\n\n @tasks.loop(seconds=0.0)\n async def dispatch_avatars(self):\n while True:\n files = [await self.queue.get() for _ in range(10)]\n try:\n upload_batch = await self.bot.avatar_webhook.send(\n files=files, wait=True\n )\n for x in upload_batch.attachments:\n self.avatar_batch.append(\n {\n \"user_id\": int(x.filename.split(\".\")[0]),\n \"avatar_id\": x.id,\n }\n )\n except discord.HTTPException as e:\n # Here the combined files likely went over the 8mb file limit\n # Lets divide them up into 2 parts and send them separately.\n upload_batch_1 = await self.bot.avatar_webhook.send(\n files=files[:5], wait=True\n )\n upload_batch_2 = await self.bot.avatar_webhook.send(\n files=files[5:], wait=True\n )\n new_upload_batch = (\n upload_batch_1.attachments + upload_batch_2.attachments\n )\n for x in new_upload_batch:\n self.avatar_batch.append(\n {\n \"user_id\": int(x.filename.split(\".\")[0]),\n \"avatar_id\": x.id,\n }\n )\n try:\n await self.bot.logging_webhook.send(\n f\"{self.emote_dict['success']} **Information** `{datetime.utcnow()}`\\n\"\n f\"```prolog\\nQueue: Payload data limit resolved.```\",\n username=f\"{self.user.name} Logger\",\n avatar_url=self.bot.constants.avatars[\"green\"],\n )\n except Exception:\n pass\n except Exception as e:\n self.bot.dispatch(\"error\", \"queue_error\", tb=utils.traceback_maker(e))\n\n @tasks.loop(seconds=0.5)\n async def status_inserter(self):\n if self.status_batch: # Insert all status changes\n async with self.batch_lock:\n if self.status_batch[\"online\"]:\n query = \"\"\"\n INSERT INTO userstatus (user_id)\n SELECT x.user_id\n FROM JSONB_TO_RECORDSET($1::JSONB)\n AS x(user_id BIGINT, last_changed DOUBLE PRECISION)\n ON CONFLICT (user_id)\n DO UPDATE SET last_changed = EXCLUDED.last_changed,\n online = userstatus.online + (EXCLUDED.last_changed - userstatus.last_changed);\n \"\"\"\n data = json.dumps(\n [\n {\"user_id\": user_id, \"last_changed\": timestamp}\n for user_id, timestamp in self.status_batch[\n \"online\"\n ].items()\n ]\n )\n await self.bot.cxn.execute(query, data)\n self.status_batch[\"online\"].clear()\n if self.status_batch[\"idle\"]:\n query = \"\"\"\n INSERT INTO userstatus (user_id)\n SELECT x.user_id\n FROM JSONB_TO_RECORDSET($1::JSONB)\n AS x(user_id BIGINT, last_changed DOUBLE PRECISION)\n ON CONFLICT (user_id)\n DO UPDATE SET last_changed = EXCLUDED.last_changed,\n idle = userstatus.idle + (EXCLUDED.last_changed - userstatus.last_changed);\n \"\"\"\n data = json.dumps(\n [\n {\"user_id\": user_id, \"last_changed\": timestamp}\n for user_id, timestamp in self.status_batch[\"idle\"].items()\n ]\n )\n await self.bot.cxn.execute(query, data)\n self.status_batch[\"idle\"].clear()\n if self.status_batch[\"dnd\"]:\n query = \"\"\"\n INSERT INTO userstatus (user_id)\n SELECT x.user_id\n FROM JSONB_TO_RECORDSET($1::JSONB)\n AS x(user_id BIGINT, last_changed DOUBLE PRECISION)\n ON CONFLICT (user_id)\n DO UPDATE SET last_changed = EXCLUDED.last_changed,\n dnd = userstatus.dnd + (EXCLUDED.last_changed - userstatus.last_changed)\n \"\"\"\n data = json.dumps(\n [\n {\"user_id\": user_id, \"last_changed\": timestamp}\n for user_id, timestamp in self.status_batch[\"dnd\"].items()\n ]\n )\n await self.bot.cxn.execute(query, data)\n self.status_batch[\"dnd\"].clear()\n if self.status_batch[\"offline\"]:\n query = \"\"\"\n INSERT INTO userstatus (user_id)\n SELECT x.user_id\n FROM JSONB_TO_RECORDSET($1::JSONB)\n AS x(user_id BIGINT, last_changed DOUBLE PRECISION)\n ON CONFLICT (user_id)\n DO UPDATE SET last_changed = EXCLUDED.last_changed;\n \"\"\"\n data = json.dumps(\n [\n {\"user_id\": user_id, \"last_changed\": timestamp}\n for user_id, timestamp in self.status_batch[\n \"offline\"\n ].items()\n ]\n )\n await self.bot.cxn.execute(query, data)\n self.status_batch[\"offline\"].clear()\n\n @status_inserter.error\n async def loop_error(self, exc):\n self.bot.dispatch(\"error\", \"loop_error\", tb=utils.traceback_maker(exc))\n\n @tasks.loop(seconds=0.2)\n async def message_inserter(self):\n \"\"\"\n Main bulk message inserter\n \"\"\"\n\n if self.message_batch: # Insert every message into the db\n query = \"\"\"\n INSERT INTO messages (unix, timestamp, content,\n message_id, author_id, channel_id, server_id)\n SELECT x.unix, x.timestamp, x.content,\n x.message_id, x.author_id, x.channel_id, x.server_id\n FROM JSONB_TO_RECORDSET($1::JSONB)\n AS x(unix REAL, timestamp TIMESTAMP, content TEXT,\n message_id BIGINT, author_id BIGINT,\n channel_id BIGINT, server_id BIGINT)\n \"\"\"\n async with self.batch_lock:\n data = json.dumps(self.message_batch)\n await self.bot.cxn.execute(query, data)\n self.message_batch.clear()\n\n if self.snipe_batch: # Snipe command setup\n query = \"\"\"\n UPDATE messages\n SET deleted = True\n WHERE message_id = $1;\n \"\"\" # Updates already stored messages.\n async with self.batch_lock:\n await self.bot.cxn.executemany(query, ((x,) for x in self.snipe_batch))\n self.snipe_batch.clear()\n\n if self.edited_batch: # Edit snipe command setup\n query = \"\"\"\n UPDATE messages\n SET edited = True\n WHERE message_id = $1;\n \"\"\" # Updates already stored messages.\n async with self.batch_lock:\n await self.bot.cxn.executemany(query, ((x,) for x in self.edited_batch))\n self.edited_batch.clear()\n\n @message_inserter.error\n async def loop_error(self, exc):\n self.bot.dispatch(\"error\", \"loop_error\", tb=utils.traceback_maker(exc))\n\n @tasks.loop(seconds=2.0)\n async def bulk_inserter(self):\n self.bot.batch_inserts += 1\n if self.command_batch: # Insert all the commands executed.\n query = \"\"\"\n INSERT INTO commands (server_id, channel_id,\n author_id, timestamp, prefix, command, failed)\n SELECT x.server, x.channel, x.author,\n x.timestamp, x.prefix, x.command, x.failed\n FROM JSONB_TO_RECORDSET($1::JSONB)\n AS x(server BIGINT, channel BIGINT,\n author BIGINT, timestamp TIMESTAMP,\n prefix TEXT, command TEXT, failed BOOLEAN);\n \"\"\"\n async with self.batch_lock:\n data = json.dumps(self.command_batch)\n await self.bot.cxn.execute(query, data)\n\n # Command logger to ./data/logs/commands.log\n destination = None\n for x in self.command_batch:\n if x[\"server\"] is None:\n destination = \"Private Message\"\n else:\n destination = f\"#{self.bot.get_channel(x['channel'])} [{x['channel']}] ({self.bot.get_guild(x['server'])}) [{x['server']}]\"\n command_logger.info(\n f\"{self.bot.get_user(x['author'])} in {destination}: {x['content']}\"\n )\n self.command_batch.clear()\n\n # Emoji usage tracking\n if self.emoji_batch:\n query = \"\"\"\n INSERT INTO emojistats (server_id, emoji_id, total)\n SELECT x.server_id, x.emoji_id, x.added\n FROM JSONB_TO_RECORDSET($1::JSONB)\n AS x(server_id BIGINT, emoji_id BIGINT, added INT)\n ON CONFLICT (server_id, emoji_id) DO UPDATE\n SET total = emojistats.total + EXCLUDED.total;\n \"\"\"\n async with self.batch_lock:\n data = json.dumps(\n [\n {\"server_id\": server_id, \"emoji_id\": emoji_id, \"added\": count}\n for server_id, data in self.emoji_batch.items()\n for emoji_id, count in data.items()\n ]\n )\n await self.bot.cxn.execute(query, data)\n self.emoji_batch.clear()\n\n if self.tracker_batch: # Track user last seen times\n query = \"\"\"\n INSERT INTO tracker (user_id, unix, action)\n VALUES ($1, $2, $3)\n ON CONFLICT (user_id)\n DO UPDATE SET unix = $2, action = $3\n WHERE tracker.user_id = $1;\n \"\"\"\n async with self.batch_lock:\n await self.bot.cxn.executemany(\n query,\n (\n (entry[0], entry[1][0], entry[1][1])\n for entry in self.tracker_batch.items()\n ),\n )\n self.tracker_batch.clear()\n\n if self.avatar_batch: # Save user avatars\n query = \"\"\"\n INSERT INTO useravatars (user_id, avatar_id)\n SELECT x.user_id, x.avatar_id\n FROM JSONB_TO_RECORDSET($1::JSONB)\n AS x(user_id BIGINT, avatar_id BIGINT)\n \"\"\"\n async with self.batch_lock:\n data = json.dumps(self.avatar_batch)\n await self.bot.cxn.execute(query, data)\n self.avatar_batch.clear()\n\n if self.usernames_batch: # Save usernames\n query = \"\"\"\n INSERT INTO usernames (user_id, username)\n SELECT x.user_id, x.name\n FROM JSONB_TO_RECORDSET($1::JSONB)\n AS x(user_id BIGINT, name TEXT)\n \"\"\"\n async with self.batch_lock:\n data = json.dumps(self.usernames_batch)\n await self.bot.cxn.execute(query, data)\n self.usernames_batch.clear()\n\n if self.nicknames_batch: # Save user nicknames\n query = \"\"\"\n INSERT INTO usernicks (user_id, server_id, nickname)\n SELECT x.user_id, x.server_id, x.nickname\n FROM JSONB_TO_RECORDSET($1::JSONB)\n AS x(user_id BIGINT, server_id BIGINT, nickname TEXT)\n \"\"\"\n async with self.batch_lock:\n data = json.dumps(self.nicknames_batch)\n await self.bot.cxn.execute(query, data)\n self.nicknames_batch.clear()\n\n if self.roles_batch: # Insert roles to reassign later.\n query = \"\"\"\n INSERT INTO userroles (user_id, server_id, roles)\n SELECT x.user_id, x.server_id, x.roles\n FROM JSONB_TO_RECORDSET($1::JSONB)\n AS x(user_id BIGINT, server_id BIGINT, roles TEXT)\n ON CONFLICT (user_id, server_id)\n DO UPDATE SET roles = EXCLUDED.roles\n \"\"\"\n async with self.batch_lock:\n data = json.dumps(\n [\n {\"server_id\": server_id, \"user_id\": user_id, \"roles\": roles}\n for server_id, data in self.roles_batch.items()\n for user_id, roles in data.items()\n ]\n )\n await self.bot.cxn.execute(query, data)\n self.roles_batch.clear()\n\n if self.invite_batch: # Insert invite data for basic tracking\n query = \"\"\"\n INSERT INTO invites (invitee, inviter, server_id)\n SELECT x.invitee, x.inviter, x.server_id\n FROM JSONB_TO_RECORDSET($1::JSONB)\n AS x(invitee BIGINT, inviter BIGINT, server_id BIGINT)\n \"\"\"\n async with self.batch_lock:\n data = json.dumps(self.invite_batch)\n await self.bot.cxn.execute(query, data)\n self.invite_batch.clear()\n\n @bulk_inserter.error\n async def loop_error(self, exc):\n self.bot.dispatch(\"error\", \"loop_error\", tb=utils.traceback_maker(exc))\n\n @commands.Cog.listener()\n @decorators.wait_until_ready()\n async def on_command(self, ctx):\n command = ctx.command.qualified_name\n self.bot.command_stats[command] += 1\n if ctx.guild:\n server_id = ctx.guild.id\n else:\n server_id = None\n async with self.batch_lock:\n self.command_batch.append(\n {\n \"server\": server_id,\n \"channel\": ctx.channel.id,\n \"author\": ctx.author.id,\n \"timestamp\": str(ctx.message.created_at.utcnow()),\n \"prefix\": ctx.prefix,\n \"command\": ctx.command.qualified_name,\n \"failed\": ctx.command_failed,\n \"content\": ctx.message.clean_content.replace(\"\\u0000\", \"\"),\n }\n )\n\n @commands.Cog.listener()\n @decorators.wait_until_ready()\n async def on_raw_message_delete(self, payload):\n async with self.batch_lock:\n self.snipe_batch.append(payload.message_id)\n\n # Helper functions to detect changes\n @staticmethod\n async def status_changed(before, after):\n if before.status != after.status:\n return True\n\n try:\n if before.activity != after.activity:\n return True\n except KeyError:\n pass\n\n @staticmethod\n async def avatar_changed(before, after):\n if before.avatar_url != after.avatar_url:\n return True\n\n @staticmethod\n async def username_changed(before, after):\n if before.discriminator != after.discriminator:\n return True\n if before.name != after.name:\n return True\n\n @staticmethod\n async def nickname_changed(before, after):\n if before.display_name != after.display_name:\n return True\n\n @staticmethod\n async def roles_changed(before, after):\n if before.roles != after.roles:\n return True\n\n @commands.Cog.listener()\n @decorators.wait_until_ready()\n @decorators.event_check(lambda s, b, a: not a.bot)\n async def on_member_update(self, before, after):\n\n if before.status != after.status:\n async with self.batch_lock:\n self.status_batch[str(before.status)][after.id] = time.time()\n\n if await self.status_changed(before, after):\n async with self.batch_lock:\n self.tracker_batch[before.id] = (time.time(), \"updating their status\")\n\n if await self.nickname_changed(before, after):\n async with self.batch_lock:\n self.nicknames_batch.append(\n {\n \"user_id\": after.id,\n \"server_id\": after.guild.id,\n \"nickname\": before.display_name.replace(\"\\u0000\", \"\"),\n }\n )\n\n @commands.Cog.listener()\n @decorators.wait_until_ready()\n @decorators.event_check(lambda s, b, a: not a.bot)\n async def on_user_update(self, before, after):\n \"\"\"\n Here's where we get notified of avatar,\n username, and discriminator changes.\n \"\"\"\n if await self.avatar_changed(before, after):\n async with self.batch_lock:\n self.tracker_batch[before.id] = (time.time(), \"updating their avatar\")\n if self.bot.avatar_webhook: # Check if we have the webhook set up.\n try:\n avatar_url = str(after.avatar_url_as(format=\"png\", size=1024))\n resp = await self.bot.get((avatar_url), res_method=\"read\")\n data = io.BytesIO(resp)\n dfile = discord.File(data, filename=f\"{after.id}.png\")\n self.queue.put_nowait(dfile)\n except Exception as e:\n await self.bot.logging_webhook.send(f\"Error in avatar_batcher: {e}\")\n await self.bot.logging_webhook.send(\n \"```prolog\\n\" + str(traceback.format_exc()) + \"```\"\n )\n\n if await self.username_changed(before, after):\n async with self.batch_lock:\n self.usernames_batch.append(\n {\n \"user_id\": before.id,\n \"name\": str(before).replace(\"\\u0000\", \"\"),\n }\n )\n self.tracker_batch[before.id] = (time.time(), \"updating their username\")\n\n @commands.Cog.listener()\n @decorators.wait_until_ready()\n @decorators.event_check(lambda s, m: m.guild and not m.author.bot)\n async def on_message(self, message):\n async with self.batch_lock:\n self.message_batch.append(\n {\n \"unix\": message.created_at.replace(tzinfo=timezone.utc).timestamp(),\n \"timestamp\": str(message.created_at.utcnow()),\n \"content\": message.clean_content.replace(\"\\u0000\", \"\"),\n \"message_id\": message.id,\n \"author_id\": message.author.id,\n \"channel_id\": message.channel.id,\n \"server_id\": message.guild.id,\n }\n )\n self.tracker_batch[message.author.id] = (time.time(), \"sending a message\")\n\n matches = EMOJI_REGEX.findall(message.content)\n if matches:\n async with self.batch_lock:\n self.emoji_batch[message.guild.id].update(map(int, matches))\n\n @commands.Cog.listener()\n @decorators.wait_until_ready()\n @decorators.event_check(lambda s, c, u, w: not u.bot)\n async def on_typing(self, channel, user, when):\n async with self.batch_lock:\n self.tracker_batch[user.id] = (time.time(), \"typing\")\n\n @commands.Cog.listener()\n @decorators.wait_until_ready()\n async def on_raw_message_edit(self, payload):\n self.edited_batch.append(payload.message_id)\n channel_obj = self.bot.get_channel(payload.channel_id)\n try:\n message = await channel_obj.fetch_message(payload.message_id)\n except (RuntimeError, RuntimeWarning):\n pass\n except Exception:\n return\n if message.author.bot:\n return\n async with self.batch_lock:\n self.tracker_batch[message.author.id] = (time.time(), \"editing a message\")\n\n @commands.Cog.listener()\n @decorators.wait_until_ready()\n async def on_raw_reaction_add(self, payload):\n\n user = self.bot.get_user(payload.user_id)\n if not user:\n return\n if user.bot:\n return\n async with self.batch_lock:\n self.tracker_batch[payload.user_id] = (time.time(), \"reacting to a message\")\n\n @commands.Cog.listener()\n @decorators.wait_until_ready()\n @decorators.event_check(lambda s, m, b, a: not m.bot)\n async def on_voice_state_update(self, member, before, after):\n async with self.batch_lock:\n self.tracker_batch[member.id] = (time.time(), \"changing their voice state\")\n\n @commands.Cog.listener()\n @decorators.wait_until_ready()\n @decorators.event_check(lambda s, i: i.inviter and not i.inviter.bot)\n async def on_invite_create(self, invite):\n async with self.batch_lock:\n self.tracker_batch[invite.inviter.id] = (time.time(), \"creating an invite\")\n if not invite.guild.me.guild_permissions.manage_guild:\n return\n self.bot.invites[invite.guild.id] = await invite.guild.invites()\n\n @commands.Cog.listener()\n @decorators.wait_until_ready()\n async def on_invite_delete(self, invite):\n if not invite.guild.me.guild_permissions.manage_guild:\n return\n self.bot.invites[invite.guild.id] = await invite.guild.invites()\n\n @commands.Cog.listener()\n @decorators.wait_until_ready()\n @decorators.event_check(lambda s, m: not m.bot)\n async def on_member_join(self, member):\n async with self.batch_lock:\n self.tracker_batch[member.id] = (time.time(), \"joining a server\")\n\n await asyncio.sleep(2) # API rest.\n\n try:\n if not member.guild.me.guild_permissions.manage_guild:\n return\n except AttributeError: # Sometimes if we're getting kicked as they join...\n return\n async with self.batch_lock:\n old_invites = self.bot.invites[member.guild.id]\n new_invites = await member.guild.invites()\n for invite in old_invites:\n if not invite:\n self.bot.logging_webhook.send(f\"**Invite is NoneType**\")\n continue\n if not self.get_invite(new_invites, invite.code):\n self.bot.logging_webhook.send(f\"**Invite code was not matched**\")\n continue\n if invite.uses < self.get_invite(new_invites, invite.code).uses:\n self.invite_batch.append(\n {\n \"invitee\": member.id,\n \"inviter\": invite.inviter.id,\n \"server_id\": member.guild.id,\n }\n )\n self.bot.invites[member.guild.id] = new_invites\n\n def get_invite(self, invite_list, code):\n for invite in invite_list:\n if invite.code == code:\n return invite\n\n @commands.Cog.listener()\n @decorators.wait_until_ready()\n @decorators.event_check(lambda s, m: not m.bot)\n async def on_member_remove(self, member):\n async with self.batch_lock:\n self.tracker_batch[member.id] = (time.time(), \"leaving a server\")\n roles = \",\".join([str(x.id) for x in member.roles if x.name != \"@everyone\"])\n self.roles_batch[member.guild.id].update({member.id: roles})\n\n if not member.guild.me.guild_permissions.manage_guild:\n return\n self.bot.invites[member.guild.id] = await member.guild.invites()\n\n @commands.Cog.listener()\n @decorators.wait_until_ready()\n async def on_reaction_add(self, reaction, user):\n self.bot.dispatch(\"picklist_reaction\", reaction, user)\n\n @commands.Cog.listener()\n @decorators.wait_until_ready()\n async def on_reaction_remove(self, reaction, user):\n self.bot.dispatch(\"picklist_reaction\", reaction, user)\n\n async def last_observed(self, member):\n \"\"\"Lookup last_observed data.\"\"\"\n query = \"\"\"\n SELECT DISTINCT ON (unix) unix, action\n FROM tracker\n WHERE user_id = $1\n ORDER BY unix DESC;\n \"\"\"\n last_seen = await self.bot.cxn.fetchrow(query, member.id) or None\n\n query = \"\"\"\n SELECT MAX(unix)\n FROM messages\n WHERE author_id = $1;\n \"\"\"\n last_spoke = await self.bot.cxn.fetchval(query, member.id) or None\n server_last_spoke = None\n if hasattr(member, \"guild\"):\n query = \"\"\"\n SELECT MAX(unix)\n FROM messages\n WHERE author_id = $1\n AND server_id = $2;\n \"\"\"\n server_last_spoke = await self.bot.cxn.fetchval(\n query, member.id, member.guild.id\n )\n\n if last_seen:\n action = last_seen[1]\n last_seen = last_seen[0]\n last_seen = utils.time_between(int(last_seen), int(time.time()))\n else:\n action = None\n last_seen = None\n if last_spoke:\n last_spoke = utils.time_between(int(last_spoke), int(time.time()))\n else:\n last_spoke = None\n if server_last_spoke:\n server_last_spoke = utils.time_between(\n int(server_last_spoke), int(time.time())\n )\n else:\n server_last_spoke = None\n\n observed_data = {\n \"action\": action,\n \"last_seen\": last_seen,\n \"last_spoke\": last_spoke,\n \"server_last_spoke\": server_last_spoke,\n }\n return observed_data\n\n async def get_avs(self, user):\n \"\"\"\n Lookup all saved user avatars\n \"\"\"\n avatars = []\n query = \"\"\"\n SELECT ARRAY(\n SELECT avatar_id\n FROM useravatars\n WHERE user_id = $1\n ORDER BY insertion DESC\n ) as avatar_list;\n \"\"\"\n results = await self.bot.cxn.fetchval(query, user.id)\n avatars.extend(results)\n if avatars:\n avatars = [\n f\"https://cdn.discordapp.com/attachments/{self.bot.avatar_webhook.channel.id}/{x}/{user.id}.png\"\n for x in avatars\n ]\n return avatars\n\n async def get_names(self, user):\n \"\"\"\n Lookup all saved usernames\n \"\"\"\n usernames = [str(user)] # Tack on their current username\n query = \"\"\"\n SELECT ARRAY(\n SELECT username\n FROM usernames\n WHERE user_id = $1\n ORDER BY insertion DESC\n ) as name_list;\n \"\"\"\n results = await self.bot.cxn.fetchval(query, user.id)\n if results:\n usernames.extend(results)\n return usernames\n\n async def get_nicks(self, user):\n \"\"\"\n Lookup all saved nicknames\n \"\"\"\n if not hasattr(user, \"guild\"):\n return [] # Not a 'member' object\n nicknames = [user.display_name] # Tack on their current nickname\n query = \"\"\"\n SELECT ARRAY(\n SELECT nickname\n FROM usernicks\n WHERE user_id = $1\n AND server_id = $2\n ORDER BY insertion DESC\n ) as nick_list;\n \"\"\"\n results = await self.bot.cxn.fetchval(query, user.id, user.guild.id)\n if results:\n nicknames.extend(results)\n return nicknames\n","sub_path":"cogs/batch.py","file_name":"batch.py","file_ext":"py","file_size_in_byte":30592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"262634329","text":"import recursive_print\n\n\ndef get_down_connections(\n rows,\n key_col,\n downstream_col,\n length_col,\n mask_set=None,\n length_key=r\"length\",\n upstreams_key=r\"upstreams\",\n downstream_key=r\"downstream\",\n data_key=r\"data\",\n verbose=False,\n debuglevel=0,\n):\n # TODO: Consider moving debug and verbose prints to the calling function\n if debuglevel <= -100:\n breakpoint()\n if verbose:\n print(\"down connections ...\")\n\n connections = {\n row[key_col]: {\n downstream_key: row[downstream_col],\n length_key: row[length_col],\n data_key: list(row),\n }\n for row in rows\n if row[key_col] in mask_set\n }\n\n if debuglevel <= -1:\n print(f\"found {len(connections.keys())} segments\")\n if debuglevel <= -3:\n if verbose:\n print(f\"The complete 'connections' object is as follows:\")\n print(connections)\n if verbose:\n print(\"down_connections complete\")\n\n return connections\n\n\ndef get_waterbody_segments(\n connections=None,\n terminal_code=-999,\n waterbody_col=3,\n waterbody_null_code=0,\n data_key=r\"data\",\n downstream_key=r\"downstream\",\n upstreams_key=r\"upstreams\",\n verbose=False,\n debuglevel=0,\n):\n\n if verbose:\n print(\"level_pool_waterbody_set ...\")\n waterbody_dict = {}\n level_pool_waterbody_set = {con[data_key][waterbody_col] for key, con in connections.items()}\n level_pool_waterbody_set.discard(waterbody_null_code)\n waterbody_dict[\"level_pool\"] = level_pool_waterbody_set\n if debuglevel <= -1:\n print(f\"found {len(level_pool_waterbody_set)} waterbodies\")\n if debuglevel <= -3:\n print(level_pool_waterbody_set)\n if verbose:\n print(\"level_pool_waterbody_set complete\")\n\n if verbose:\n print(\"waterbody segments ...\")\n waterbody_segments = {\n key: con[data_key][waterbody_col]\n for key, con in connections.items()\n if not (con[data_key][waterbody_col] == waterbody_null_code)\n }\n if debuglevel <= -1:\n print(f\"found {len(waterbody_segments)} segments that are part of a waterbody\")\n if debuglevel <= -3:\n print(waterbody_segments)\n if verbose:\n print(\"waterbody_segments complete\")\n\n if verbose:\n print(\"waterbody_outlet_set ...\")\n waterbody_outlet_set = set()\n for waterbody_segment in waterbody_segments:\n if connections[waterbody_segment][downstream_key] not in waterbody_segments:\n waterbody_outlet_set.add(waterbody_segment)\n if debuglevel <= -1:\n print(\n f\"found {len(waterbody_outlet_set)} segments that are outlets of a waterbody\"\n )\n if debuglevel <= -3:\n print(waterbody_outlet_set)\n if verbose:\n print(\"waterbody_outlet_set complete\")\n\n if verbose:\n print(\"waterbody_downstream_set ...\")\n waterbody_downstream_set = set()\n for outlet_segment in waterbody_outlet_set:\n waterbody_downstream_set.add(connections[outlet_segment][downstream_key])\n if debuglevel <= -1:\n print(\n f\"found {len(waterbody_downstream_set)} segments that are below outlets of a waterbody\"\n )\n if debuglevel <= -3:\n print(waterbody_downstream_set)\n if verbose:\n print(\"waterbody_downstream_set complete\")\n\n if verbose:\n print(\"waterbody_upstreams_set ...\")\n waterbody_upstreams_set = set()\n for waterbody_segment in waterbody_segments:\n for upstream in connections[waterbody_segment][upstreams_key]:\n if not upstream == terminal_code and not upstream in waterbody_segments:\n waterbody_upstreams_set.add(upstream)\n waterbody_upstreams_set.discard(\n terminal_code\n ) # TODO: Is this the best place for this filter -- check if ever used.\n if debuglevel <= -1:\n print(\n f\"found {len(waterbody_upstreams_set)} segments that are upstream of a waterbody\"\n )\n if debuglevel <= -3:\n print(waterbody_upstreams_set)\n if verbose:\n print(\"waterbody_upstreams_set complete\")\n\n return (\n waterbody_dict,\n waterbody_segments,\n waterbody_outlet_set,\n waterbody_upstreams_set,\n waterbody_downstream_set,\n )\n\n\ndef determine_keys(\n connections\n # , key_col, downstream_col\n ,\n terminal_code,\n upstreams_key=r\"upstreams\",\n downstream_key=r\"downstream\",\n verbose=False,\n debuglevel=0,\n):\n\n if verbose:\n print(\"ref_keys ...\")\n ref_keys = {con[downstream_key] for key, con in connections.items()}\n if debuglevel <= -1:\n print(f\"found {len(ref_keys)} ref_keys\")\n if debuglevel <= -3:\n print(ref_keys)\n if verbose:\n print(\"ref_keys complete\")\n\n if verbose:\n print(\"headwater_keys ...\")\n headwater_keys = {x for x in connections.keys() if x not in ref_keys}\n if debuglevel <= -1:\n print(f\"found {len(headwater_keys)} headwater segments\")\n if debuglevel <= -3:\n print(headwater_keys)\n if verbose:\n print(\"headwater_keys complete\")\n\n # Get the downstream terminating nodes\n if verbose:\n print(\"terminal_keys ...\")\n # Find the pointing-to keys not found in the key dataset.\n terminal_ref_keys = {x for x in ref_keys if x not in connections.keys()}\n\n # Then collect the keys associated with those 'pointing-tos'\n terminal_keys = set()\n for key, con in connections.items():\n curr_term_ref_key = con[downstream_key]\n if curr_term_ref_key in terminal_ref_keys:\n if curr_term_ref_key != terminal_code:\n if debuglevel <= -2:\n print(\n f\"Non-standard terminal key {con[downstream_key]} found in segment {key}\"\n )\n elif curr_term_ref_key == terminal_code:\n if debuglevel <= -3:\n print(\n f\"Standard terminal key {con[downstream_key]} found in segment {key}\"\n )\n terminal_keys.add(key)\n if debuglevel <= -1:\n print(f\"found {len(terminal_keys)} terminal segments\")\n if debuglevel <= -1:\n print(\n f\"of those, {len([x for x in terminal_ref_keys if x != terminal_code])} had non-standard terminal keys\"\n )\n if debuglevel <= -3:\n print(terminal_keys)\n if verbose:\n print(\"terminal_keys complete\")\n\n if verbose:\n print(\"circular_keys ...\")\n circular_keys = set()\n for key, value in connections.items():\n try:\n # TODO: benchmark try/except vs. nested if statment on 'in' to handle terminal keys\n # e.g., \"if key not in terminal_keys: ... etc.\n if connections[connections[key][downstream_key]][downstream_key] == key:\n circular_keys.add(key)\n elif (\n connections[\n connections[connections[key][downstream_key]][downstream_key]\n ][downstream_key]\n == key\n ):\n circular_keys.add(key)\n elif (\n connections[\n connections[\n connections[connections[key][downstream_key]][downstream_key]\n ][downstream_key]\n ][downstream_key]\n == key\n ):\n circular_keys.add(key)\n elif (\n connections[\n connections[\n connections[\n connections[connections[key][downstream_key]][\n downstream_key\n ]\n ][downstream_key]\n ][downstream_key]\n ][downstream_key]\n == key\n ):\n circular_keys.add(key)\n except:\n pass\n\n if debuglevel <= -1:\n print(\n f\"identified at least {len(circular_keys)} segments with circular references testing to the fourth level\"\n )\n if debuglevel <= -3:\n print(circular_keys)\n if verbose:\n print(\"circular_keys complete\")\n\n return (\n connections.keys(),\n ref_keys,\n headwater_keys,\n terminal_keys,\n terminal_ref_keys,\n circular_keys,\n )\n\n\ndef get_up_connections(\n connections,\n terminal_code,\n headwater_keys,\n terminal_keys,\n upstreams_key=r\"upstreams\",\n downstream_key=r\"downstream\",\n verbose=False,\n debuglevel=0,\n):\n\n # Create inverse of connections looking upstream\n if verbose:\n print(\"identifying upstream connections and junctions ...\")\n\n # Using Sets for Junction and Visited keys is REALLY, REALLY, REALLY, FAST!!!\n junction_keys = set()\n visited_keys = set()\n visited_terminal_keys = set()\n junction_count = 0\n for hkey in headwater_keys:\n # TODO: Create a dictionary key identifying relationship to the terminal segment.\n\n # Start with the headwater keys and label the upstream connections\n # with the terminal_code...\n connections[hkey].update({upstreams_key: {terminal_code}})\n visited_keys.add(hkey)\n # Then iterate through the list and search for the other values\n ukey = hkey\n # print(ukey, hkey)\n # print(visited_keys)\n # print(ukey not in terminal_keys)\n # print(ukey not in junction_keys)\n while True:\n dkey = connections[ukey][downstream_key]\n if (ukey in terminal_keys) or (ukey in junction_keys):\n # If we have hit the bottom (a terminal_key) or if\n # we have joined into an already explored branch, STOP.\n if ukey in terminal_keys:\n visited_terminal_keys.add(ukey)\n break\n if (\n upstreams_key not in connections[dkey]\n ): # Check for key in dictionary https://able.bio/rhett/check-if-a-key-exists-in-a-python-dictionary--73iajoz\n connections[dkey].update({upstreams_key: set()})\n connections[dkey][upstreams_key].add(ukey)\n visited_keys.add(dkey)\n else:\n if terminal_code in connections[dkey][upstreams_key]:\n # If the downstream node here is labeled as a headwater (because it\n # has an upstream set with the terminal code), it means\n # that the network had a break and that the traversal has\n # spanned the gap and the headwater is not actually not a terminating node.\n # In that case, reset the node to be a blank list (or set, if using\n # that method), then proceed downstream.\n # TODO: THIS IS A DANGEROUS/FRAGILE STEP AND DESERVES ADDITIONAL REVIEW\n # TODO: TO MAKE SURE IT IS DOING WHAT WE INTEND AS DESCRIBED ABOVE\n # TODO: RESERVOIRS: For instance, this will probably break for subnetworks containing reservoirs\n\n connections[dkey].update({upstreams_key: set()})\n\n connections[dkey][upstreams_key].add(ukey)\n visited_keys.add(dkey)\n # print(dkey, connections[dkey][upstreams_key], visited_keys)\n if len(connections[dkey][upstreams_key]) == 2:\n if dkey not in junction_keys:\n junction_keys.add(dkey)\n junction_count += 1\n if debuglevel <= -2:\n print(\n f\"Junction found above/into Segment {dkey} with upstream Segments {connections[dkey][upstreams_key]}\"\n )\n elif len(connections[dkey][upstreams_key]) > 2:\n if dkey not in junction_keys:\n # At this point, the logic does not allow for this to be a non-junction\n # TODO: raise/handle error/warning\n print(\n \"key error -- junction analysis has an undetermined anomaly!\"\n )\n # print(dkey in visited_keys)\n # for temp_ukey in connections[dkey][upstreams_key]:\n # print(temp_ukey, temp_ukey in visited_keys)\n if debuglevel <= -2:\n print(\n f\"revisited Junction above/into Segment {dkey} now with upstream Segments {connections[dkey][upstreams_key]}\"\n )\n junction_count += 1\n ukey = dkey\n\n if debuglevel <= -1:\n print(f\"visited {len(visited_keys)} segments\")\n if debuglevel <= -1:\n print(\n f\"found {junction_count} junctions in {len(junction_keys)} junction nodes\"\n )\n if debuglevel <= -3:\n print(junction_keys)\n if debuglevel <= -4:\n print(connections)\n if verbose:\n print(\"up_connections complete\")\n if verbose:\n print(\"\")\n\n if verbose:\n print(\"confluence segments ...\")\n confluence_segment_set = {\n seg for seg, con in connections.items() if con[downstream_key] in junction_keys\n }\n if debuglevel <= -1:\n print(f\"found {len(confluence_segment_set)} confluence segments\")\n if debuglevel <= -3:\n print(confluence_segment_set)\n if verbose:\n print(\"confluence_segment_set complete\")\n\n return (\n junction_keys,\n confluence_segment_set,\n visited_keys,\n visited_terminal_keys,\n junction_count,\n )\n\n\ndef main():\n \"\"\"##TEST\"\"\"\n print(\"\")\n print(\"Executing Test\")\n # Test data\n debuglevel = -3\n verbose = True\n test_rows = [\n [50, 178, 51, 0],\n [51, 178, 50, 0],\n [60, 178, 61, 0],\n [61, 178, 62, 0],\n [62, 178, 60, 0],\n [70, 178, 71, 0],\n [71, 178, 72, 0],\n [72, 178, 73, 0],\n [73, 178, 70, 0],\n [80, 178, 81, 0],\n [81, 178, 82, 0],\n [82, 178, 83, 0],\n [83, 178, 84, 0],\n [84, 178, 80, 0],\n [0, 456, -999, 0],\n [1, 178, 4, 0],\n [2, 394, 0, 0],\n [3, 301, 2, 0],\n [4, 798, 0, 0],\n [5, 679, 4, 0],\n [6, 523, 0, 0],\n [7, 815, 2, 0],\n [8, 841, -999, 0],\n [9, 514, 8, 0],\n [10, 458, 9, 0],\n [11, 832, 10, 0],\n [12, 543, 11, 0],\n [13, 240, 12, 0],\n [14, 548, 13, 0],\n [15, 920, 14, 0],\n [16, 920, 15, 401],\n [17, 514, 16, 401],\n [18, 458, 17, 0],\n [180, 458, 17, 0],\n [181, 458, 180, 0],\n [19, 832, 18, 0],\n [20, 543, 19, 0],\n [21, 240, 16, 401],\n [22, 548, 21, 0],\n [23, 920, 22, 0],\n [24, 240, 23, 0],\n [25, 548, 12, 0],\n [26, 920, 25, 0],\n [27, 920, 26, 0],\n [28, 920, 27, 0],\n ]\n\n test_key_col = 0\n test_downstream_col = 2\n test_length_col = 1\n test_terminal_code = -999\n test_waterbody_col = 3\n test_waterbody_null_code = 0\n\n (test_connections) = get_down_connections(\n rows=test_rows,\n key_col=test_key_col,\n mask_set={row[test_key_col] for row in test_rows},\n downstream_col=test_downstream_col,\n length_col=test_length_col,\n verbose=verbose,\n debuglevel=debuglevel,\n )\n\n (\n test_all_keys,\n test_ref_keys,\n test_headwater_keys,\n test_terminal_keys,\n test_terminal_ref_keys,\n test_circular_keys,\n ) = determine_keys(\n connections=test_connections,\n terminal_code=test_terminal_code,\n verbose=verbose,\n debuglevel=debuglevel,\n )\n\n (\n test_junction_keys,\n test_confluence_segment_set,\n test_visited_keys,\n test_visited_terminal_keys,\n test_junction_count,\n ) = get_up_connections(\n connections=test_connections,\n terminal_code=test_terminal_code,\n headwater_keys=test_headwater_keys,\n terminal_keys=test_terminal_keys,\n verbose=verbose,\n debuglevel=debuglevel,\n )\n\n # TODO: Set/pass/identify a proper flag value\n if test_waterbody_col is not None:\n (\n test_waterbody_dict,\n test_waterbody_segments,\n test_waterbody_outlet_set,\n test_waterbody_upstreams_set,\n test_waterbody_downstream_set,\n ) = get_waterbody_segments(\n connections=test_connections,\n terminal_code=test_terminal_code,\n waterbody_col=test_waterbody_col,\n waterbody_null_code=test_waterbody_null_code,\n verbose=verbose,\n debuglevel=debuglevel,\n )\n\n recursive_print.print_connections(\n headwater_keys=test_headwater_keys,\n down_connections=test_connections,\n up_connections=test_connections,\n terminal_code=test_terminal_code,\n terminal_keys=test_terminal_keys,\n terminal_ref_keys=test_terminal_ref_keys,\n debuglevel=debuglevel,\n )\n\n recursive_print.print_basic_network_info(\n connections=test_connections,\n headwater_keys=test_headwater_keys,\n junction_keys=test_junction_keys,\n terminal_keys=test_terminal_keys,\n terminal_code=test_terminal_code,\n verbose=verbose,\n debuglevel=debuglevel,\n )\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/python_framework_v01/networkbuilder.py","file_name":"networkbuilder.py","file_ext":"py","file_size_in_byte":17529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"230521835","text":"#!/usr/bin/python3\n\ndef solve(line):\n ans = 0\n for i in range(len(line) - 1, -1, -1):\n if line[i] == \"-\":\n ans += 1\n for j in range(0, i + 1):\n if (line[j] == '-'):\n line[j] = '+'\n else:\n line[j] = '-'\n # print(line)\n return ans\n\ndef main():\n T = int(input())\n for i in range(1, T + 1):\n print(\"Case #{}: {}\".format(i, solve(list(input().strip()))))\n \n\nmain()\n","sub_path":"solutions_5634697451274240_0/Python/cdkrot/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"30041615","text":"import spotipy\nimport spotipy.util as util\nimport pandas as pd\nfrom sklearn.cluster import KMeans\nimport matplotlib.pyplot as plt\nfrom sklearn import preprocessing\nfrom sklearn import decomposition\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom sklearn import metrics\nimport numpy as np\nimport seaborn as sn\n\n#Get Songs\ntoken = util.prompt_for_user_token('1256293535','playlist-modify-public',client_id='2dceb191a60046449db76d84e7d424c1',client_secret='55303fe8baf84690a75afa5f37ef311a',redirect_uri='https://www.google.com/')\n\nspotify = spotipy.Spotify(token)\n\ndef create(items, playlist):\n total = []\n for song in items:\n temp = []\n if song.get('track') != None:\n id = song.get('track').get('id')\n features = spotify.audio_features([id])[0]\n if features != None:\n temp.append(song.get('track').get('artists')[0].get('name').encode('ascii', 'ignore'))\n temp.append(song.get('track').get('name').encode('ascii', 'ignore'))\n temp.append(id)\n temp.append(playlist)\n for y in ['energy', 'liveness', 'tempo', 'speechiness', 'acousticness', 'instrumentalness', 'danceability', 'key', 'duration_ms', 'loudness', 'valence', 'mode']:\n temp.append(features.get(y))\n total.append(temp)\n print(temp)\n return total\n\ndef get_playlist_tracks(username,playlist_id, playlist):\n results = spotify.user_playlist_tracks(username,playlist_id)\n songs = create(results.get('items'), playlist)\n while results['next']:\n results = spotify.next(results)\n for x in create(results.get('items'), playlist):\n songs.append(x)\n return songs\n\n#My Songs\ndef get_my_songs():\n vybes = get_playlist_tracks('1256293535', '4QklqO91zFyLHaZGug6AQT', 'vybes')\n chillz = get_playlist_tracks('1256293535', '2bDb7JITikmUGM5QooCTfB', 'chillz')\n gibberish = get_playlist_tracks('1256293535', '4IRBP431dMVa04bnjG88VY', 'gibberish')\n wack = get_playlist_tracks('1256293535', '56iN0HPEq6nl2lLYQlSch5', 'wack')\n singed = get_playlist_tracks('1256293535', '3QmptCYgBwtHYbcxtvXJco', 'singed')\n litt = get_playlist_tracks('1256293535', '3nF16flWwFdkDU8yUtbXQp', 'litt')\n bars = get_playlist_tracks('1256293535', '6wM0xkWBbSA9DIhXXNN6JB', 'bars')\n kms = get_playlist_tracks('1256293535', '3bOmC7EfH5I0ESVztnKgTZ', 'kms')\n total = vybes + chillz + gibberish + wack + singed + litt + bars + kms\n data = pd.DataFrame(total,\n columns=['Artist', 'Name', 'ID', 'Playlist', 'Energy', 'Liveness', 'Tempo', 'Speechiness',\n 'Acousticness', 'Instrumentalness', 'Danceability', 'Key', 'Duration', 'Loudness',\n 'Valence', 'Mode'])\n data = data.drop_duplicates(subset=['Artist', 'Name'], keep='first')\n data.to_csv('/Users/matthewli/Documents/GitHub/SpotifyProject/Data/Spotify.csv')\n\n#Extra Data\ndef get_top_songs():\n mytop = []\n my2016 = get_playlist_tracks('1256293535', '37i9dQZF1CyWPoeKJeEUem', 'my2016')\n mytop = mytop + my2016\n my2016 = pd.DataFrame(my2016, columns=['Artist', 'Name', 'ID', 'Playlist', 'Energy', 'Liveness', 'Tempo', 'Speechiness', 'Acousticness', 'Instrumentalness', 'Danceability', 'Key', 'Duration', 'Loudness', 'Valence', 'Mode'])\n my2016.to_csv('/Users/matthewli/Documents/GitHub/SpotifyProject/Data/My2016.csv')\n\n my2017 = get_playlist_tracks('1256293535', '37i9dQZF1E9QuqoF4pvCjO', 'my2017')\n mytop = mytop + my2017\n my2017 = pd.DataFrame(my2017, columns=['Artist', 'Name', 'ID', 'Playlist', 'Energy', 'Liveness', 'Tempo', 'Speechiness', 'Acousticness', 'Instrumentalness', 'Danceability', 'Key', 'Duration', 'Loudness', 'Valence', 'Mode'])\n my2017.to_csv('/Users/matthewli/Documents/GitHub/SpotifyProject/Data/My2017.csv')\n\n my2018 = get_playlist_tracks('1256293535', '37i9dQZF1EjofvKv8uxeuM', 'my2018')\n mytop = mytop + my2018\n my2018 = pd.DataFrame(my2018, columns=['Artist', 'Name', 'ID', 'Playlist', 'Energy', 'Liveness', 'Tempo', 'Speechiness', 'Acousticness', 'Instrumentalness', 'Danceability', 'Key', 'Duration', 'Loudness', 'Valence', 'Mode'])\n my2018.to_csv('/Users/matthewli/Documents/GitHub/SpotifyProject/Data/My2018.csv')\n\n my2019 = get_playlist_tracks('1256293535', '37i9dQZF1Et8A31ON5uKGj', 'my2019')\n mytop = mytop + my2019\n my2019 = pd.DataFrame(my2019, columns=['Artist', 'Name', 'ID', 'Playlist', 'Energy', 'Liveness', 'Tempo', 'Speechiness', 'Acousticness', 'Instrumentalness', 'Danceability', 'Key', 'Duration', 'Loudness', 'Valence', 'Mode'])\n my2019.to_csv('/Users/matthewli/Documents/GitHub/SpotifyProject/Data/My2019.csv')\n\n mytop = pd.DataFrame(mytop, columns=['Artist', 'Name', 'ID', 'Playlist', 'Energy', 'Liveness', 'Tempo', 'Speechiness', 'Acousticness', 'Instrumentalness', 'Danceability', 'Key', 'Duration', 'Loudness', 'Valence', 'Mode'])\n mytop = mytop.drop_duplicates(subset=['Artist', 'Name'], keep='first')\n mytop.to_csv('/Users/matthewli/Documents/GitHub/SpotifyProject/Data/MyTop.csv')\n\n#Read Data\ndef read_data(file):\n dataset = pd.read_csv(file)\n dataset = dataset.drop(dataset.columns[0], axis=1)\n transform = dataset.drop(dataset.columns[0:4], axis=1)\n for x in ['Duration', 'Tempo', 'Key', 'Loudness', 'Instrumentalness', 'Mode', 'Liveness']:\n del transform[x]\n transform = transform.astype('float')\n return (dataset, transform)\n\n#Inertia\ndef get_inertia(data):\n clusters = [1,2,3,4,5,6,7,8,9,10]\n inertia = []\n for x in clusters:\n kmeans = KMeans(n_clusters=x)\n kmeans.fit(data[1])\n inertia.append(kmeans.inertia_)\n plt.scatter(clusters, inertia)\n plt.savefig('/Users/matthewli/Documents/GitHub/SpotifyProject/Media/inertia.png')\n plt.clf()\n\n#PCA\ndef pca(data):\n transform = preprocessing.scale(data[1], with_std=False)\n pca = decomposition.PCA(n_components = 3)\n pca.fit(transform)\n pcatransform = pca.transform(transform)\n kmeans = KMeans(n_clusters=3)\n kmeans.fit(pcatransform)\n pcacenters = kmeans.cluster_centers_\n pcalabels = kmeans.labels_\n print(pca.explained_variance_ratio_)\n pcay_kmeans = kmeans.predict(pcatransform)\n plt.scatter(pcatransform[:,0],pcatransform[:,1], c=pcay_kmeans, alpha=0.25)\n plt.scatter(pcacenters[:, 0], pcacenters[:, 1], c='black', s=200, alpha=0.5);\n plt.xlabel(\"Principal Component 1\")\n plt.ylabel(\"Principal Component 2\")\n plt.savefig('/Users/matthewli/Documents/GitHub/SpotifyProject/Media/pca.png')\n data[0]['Clusters'] = pcalabels\n data[0].to_csv('/Users/matthewli/Documents/GitHub/SpotifyProject/Data/Spotify_PCA.csv')\n plt.clf()\n\n#Predict\ndef knn(data):\n x = data[1].iloc[:, :-1]\n print(x)\n y = data[1].iloc[:, 5]\n print(y)\n x = x.astype('float')\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.20)\n scaler = StandardScaler()\n scaler.fit(x_train)\n\n x_train = scaler.transform(x_train)\n x_test = scaler.transform(x_test)\n\n #Best K value\n k_range = range(1,30)\n error = []\n for k in k_range:\n knn = KNeighborsClassifier(n_neighbors=k)\n knn.fit(x_train, y_train)\n\n y_pred = knn.predict(x_test)\n\n error.append(np.mean(y_pred != y_test))\n plt.scatter(k_range, error)\n plt.plot(k_range, error)\n plt.savefig('/Users/matthewli/Documents/GitHub/SpotifyProject/Media/KNN_Error.png')\n\n plt.clf()\n\n #K=15\n knn = KNeighborsClassifier(n_neighbors=15)\n knn.fit(x_train, y_train)\n\n y_pred = knn.predict(x_test)\n confusion = confusion_matrix(y_test, y_pred)\n print(confusion)\n sn.heatmap(confusion)\n plt.savefig('/Users/matthewli/Documents/GitHub/SpotifyProject/Media/Confusion_Matrix.png')\n print(classification_report(y_test, y_pred))\n\n #Predicting\n knn.fit(x, y)\n predictdata = get_playlist_tracks('1256293535', '4ttw16DvJncoO4aY4c7wmU', '')\n predictdata = pd.DataFrame(predictdata, columns=['Artist', 'Name', 'ID', 'Playlist', 'Energy', 'Liveness', 'Tempo',\n 'Speechiness', 'Acousticness', 'Instrumentalness', 'Danceability',\n 'Key', 'Duration', 'Loudness', 'Valence', 'Mode'])\n if len(predictdata) > 0:\n predictvalues = predictdata.drop(predictdata.columns[0:4], axis=1)\n for x in ['Duration', 'Tempo', 'Key', 'Loudness', 'Instrumentalness', 'Mode', 'Liveness']:\n del predictvalues[x]\n print(predictvalues)\n print(knn.predict(predictvalues.values.tolist()))\n predictdata['Predict'] = knn.predict(predictvalues.values.tolist())\n predictdata.to_csv('/Users/matthewli/Documents/GitHub/SpotifyProject/Data/Predict.csv')\n\n#Create Playlist\ndef create_playlist():\n data = pd.read_csv('/Users/matthewli/Documents/GitHub/SpotifyProject/Data/Spotify_PCA.csv')\n for x in data.loc[data['Clusters'] == 0]['ID']:\n print(x)\n spotify.user_playlist_add_tracks('1256293535', '00W1ZozOzYbVMVYvmE8CL5', [x])\n for x in data.loc[data['Clusters'] == 1]['ID']:\n print(x)\n spotify.user_playlist_add_tracks('1256293535', '2nqkTLR8cmOj2FL3JJtOz0', [x])\n for x in data.loc[data['Clusters'] == 2]['ID']:\n print(x)\n spotify.user_playlist_add_tracks('1256293535', '3vFarZjPRZsgtXUVE9ELty', [x])\n\n#Add to Playlist\ndef add_to_playlist():\n predictdata = pd.read_csv('/Users/matthewli/Documents/GitHub/SpotifyProject/Data/Predict.csv')\n if len(predictdata) > 0:\n for x in predictdata.loc[predictdata['Predict'] == 0]['ID']:\n print(x)\n spotify.user_playlist_add_tracks('1256293535', '00W1ZozOzYbVMVYvmE8CL5', [x])\n spotify.user_playlist_remove_all_occurrences_of_tracks('1256293535', '4ttw16DvJncoO4aY4c7wmU', [x])\n for x in predictdata.loc[predictdata['Predict'] == 1]['ID']:\n print(x)\n spotify.user_playlist_add_tracks('1256293535', '2nqkTLR8cmOj2FL3JJtOz0', [x])\n spotify.user_playlist_remove_all_occurrences_of_tracks('1256293535', '4ttw16DvJncoO4aY4c7wmU', [x])\n for x in predictdata.loc[predictdata['Predict'] == 2]['ID']:\n print(x)\n spotify.user_playlist_add_tracks('1256293535', '3vFarZjPRZsgtXUVE9ELty', [x])\n spotify.user_playlist_remove_all_occurrences_of_tracks('1256293535', '4ttw16DvJncoO4aY4c7wmU', [x])\n\nget_my_songs()\npcadata = read_data('/Users/matthewli/Documents/GitHub/SpotifyProject/Data/Spotify.csv')\npca(pcadata)\nknn(pcadata)\nadd_to_playlist()","sub_path":"spotifyProject.py","file_name":"spotifyProject.py","file_ext":"py","file_size_in_byte":10747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"326580385","text":"# -*- coding:utf-8 -*-\n\nimport sqlite3\n\nclass SQL():\n def __init__(self):\n self.conn = None\n self.cursor = None\n self._init()\n\n def _init(self):\n self.conn = sqlite3.connect(\"test.db\")\n self.cursor = self.conn.cursor()\n self.cursor.execute(\"\"\"create table if not exists dev_info (name varchar primary key not null, info text);\"\"\")\n\n def insert(self, name, info):\n # 先判断记录是否存在\n if len(self.query(name)) == 0:\n self.cursor.execute(\"insert into dev_info (name, info) values(?, ?)\", (name, info))\n self.conn.commit()\n return True\n else:\n print(\"insert stop, [{0}] already exist\".format(name))\n return True\n\n def query(self, name):\n if name != \"all\":\n c = self.cursor.execute(\"select * from dev_info where name = '{0}'\".format(name))\n else:\n c = self.cursor.execute(\"select * from dev_info\")\n\n ret = c.fetchall()\n if not len(ret) == 0:\n print(\"query success. [{0}] exist\".format(name))\n else:\n print(\"query success. [{0}] not exist\".format(name))\n return ret\n\nif __name__ == \"__main__\":\n sql = SQL()\n sql.insert(\"as\", \"111111111111111111111111111111\")\n sql.insert(\"gg\", \"222222222222222222222222222222\")\n\n content = \"as\"\n result = sql.query(content)\n \n content = \"asasdfasdfas\"\n result = sql.query(content)","sub_path":"sql_utils/sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"539580080","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom django.contrib.auth.decorators import login_required\nfrom robokassa.forms import RobokassaForm\nfrom django.shortcuts import render,render_to_response,redirect,get_object_or_404\nfrom django.http.response import HttpResponse\nfrom django.template.loader import get_template\nfrom django.template import Context,Template\nfrom django.contrib import auth\nfrom django.contrib.auth.models import User\nfrom django.template.context_processors import csrf\nfrom bots.models import *\nfrom botup.models import Message_Model\nimport threading\nimport itertools\nimport telebot\nimport base64\nimport random\nimport re\nfrom crypting import encrypt,decrypt\nfrom password import *\nimport pickle\n\ndef reset_money(user):\n for i in Bots.objects.filter(user=user):\n print(i,'==================')\n i.bot_money=0\n i.save()\n print(Bots.objects.filter(user=user))\n\ndef count_money(bots):\n money=0\n for i in bots:\n money+=float(i.bot_money)\n if money == 0:\n money=int(money)\n else:\n money=float(money)\n return money\n\ndef tokens():\n tokens=[]\n for i in Bots.objects.all():\n tokens.append(i.bot_token)\n return tokens\n\ndef check_token(user,token):\n with open('filename.pickle', 'rb') as handle:\n x = pickle.load(handle)\n handle.close()\n users=[]\n tk_us=dict(x[i:i+2] for i in range(0, len(x), 2))\n for id_u,token_u in tk_us.items():\n if id_u == user:\n users.append(token_u)\n return users\n\ndef profile(request):\n if (not auth.get_user(request).is_authenticated()) or (not auth.get_user(request).is_active):\n return redirect('/auth/login')\n else:\n args={}\n args['username']=auth.get_user(request).username\n args['email']=auth.get_user(request).email\n args['count_bots']=len(Bots.objects.filter(user=auth.get_user(request).username))\n if len(Bots.objects.filter(user=auth.get_user(request).username))==0:\n args['bots']=None\n args['all_money']=0\n return render_to_response('profile.html',args)\n else:\n args['bots']=Bots.objects.filter(user=auth.get_user(request).username)\n args['all_money']=count_money(Bots.objects.filter(user=auth.get_user(request).username))\n return render_to_response('profile.html',args)\n\ndef add_bot(request):\n if (not auth.get_user(request).is_authenticated()) or (not auth.get_user(request).is_active):\n return redirect('/auth/login')\n elif len(Bots.objects.filter(user=auth.get_user(request).username))==3:\n return redirect('/profile')\n else:\n args={}\n args.update(csrf(request))\n args['username']=auth.get_user(request).username\n if request.POST:\n token = request.POST.get('token','')\n if bool(re.match(r'\\d+:.+', token)) and len(Bots.objects.filter(user=auth.get_user(request).username)) <=2 and len(Bots.objects.filter(bot_token=token)) == 0:\n ok=True\n\n try:\n telebot.TeleBot(token).get_me()\n except:\n ok=False\n\n if ok == False:\n args['error']='Неверный токен.'\n return render_to_response('add_bot.html',args)\n elif ok == True:\n name=telebot.TeleBot(token).get_me().first_name\n path=telebot.TeleBot(token).get_me().username\n new_bot=Bots(user=auth.get_user(request).username,bot_name=name,bot_path=path,bot_token=token,bot_money=0)\n new_bot.save()\n args['ready']=token\n return render_to_response('add_bot.html',args)\n\n else:\n if not bool(re.match(r'\\d+:.+', token)):\n args['error']='Неверный токен'\n elif len(Bots.objects.filter(bot_token=token)) != 0:\n args['error']='Данный бот уже добавлен'\n return render_to_response('add_bot.html',args)\n else:\n return render_to_response('add_bot.html',args)\n\ndef add_user(request,token,user):\n if token in tokens() and encrypt(password,token).decode('utf-8') not in check_token(user,token):\n with open('filename.pickle', 'rb') as handle:\n b = pickle.load(handle)\n b.append(user)\n b.append(encrypt(password,token).decode(\"utf-8\"))\n with open('filename.pickle', 'wb') as handle:\n pickle.dump(b, handle)\n else:\n return redirect('/profile')\n\ndef string_good(string,types):\n string=str(string)\n last=string[-1]\n text=''\n if last == '1':\n if types == 0:\n text=string+' рубль'\n elif types == 1:\n text=string+' бот'\n elif types == 2:\n text=string+' пользователь'\n elif last in ['2','3','4']:\n if types == 0:\n text=string+' рубля'\n elif types == 1:\n text=string+' бота'\n elif types == 2:\n text=string+' пользователя'\n elif last in ['5','6','7','8','9','0']:\n if types == 0:\n text=string+' рублей'\n elif types == 1:\n text=string+' ботов'\n elif types == 2:\n text=string+' пользователей'\n return text\n\n\ndef main(request):\n args={}\n args['username']=auth.get_user(request).username\n args['paid']=string_good(open('paid.txt','r').readlines()[0],0)\n args['bots']=string_good(str(len(Bots.objects.all())),1)\n args['users']=string_good(str(len(User.objects.all())),2)\n\n return render_to_response('index.html',args)\n\ndef ad(request):\n if (not auth.get_user(request).is_authenticated()) or (not auth.get_user(request).is_active):\n return redirect('/auth/login')\n else:\n args={}\n args.update(csrf(request))\n args['username']=auth.get_user(request).username\n with open('filename.pickle', 'rb') as handle:\n b = pickle.load(handle)\n handle.close()\n args['users_id']=str(int(len(b)/2))\n\n if request.POST:\n message=request.POST.get('message','')\n users=int(request.POST.get('slider',''))\n inv_id=random.randint(1,99999)\n money=float(users*0.7)\n if message and len(message) <= 275 and len(message) > 0 and users >= 50 and users <= int(len(b)/2):\n form = RobokassaForm(initial={\n 'MrchLogin': 'botup.ru',\n 'OutSum': str(money).replace(',','.'),\n 'InvId': str(inv_id),\n 'Desc': 'Оплата рекламы',\n 'Email': auth.get_user(request).email,\n 'IncCurrLabel': '',\n 'Culture': 'ru',\n })\n #lel\n new_model=Message_Model()\n new_model.username=auth.get_user(request).username\n new_model.message=message\n new_model.user_id=str(inv_id)\n new_model.users=int(users)\n new_model.save()\n return render(request, 'pay_with_robokassa.html', {'form': form,'money':str(money),'text':message,'messages':str(users)})\n else:\n if len(message) >= 275 or len(message) < 0:\n args['error']='Слишком длинное сообщение'\n elif not message:\n args['error']='Сообщение отсутствует'\n elif users <= 50 or users >= int(len(b)/2):\n args['error']='Неверное количество сообщений'\n return render_to_response('pay.html',args)\n\n\ndef withdaw(request):\n if (not auth.get_user(request).is_authenticated()) or (not auth.get_user(request).is_active):\n return redirect('/auth/login')\n else:\n args={}\n args.update(csrf(request))\n args['username']=auth.get_user(request).username\n summa=count_money(Bots.objects.filter(user=auth.get_user(request).username))\n args['sum']=summa\n if request.POST:\n method=request.POST.get('biller','')\n bill=request.POST.get('wallet','')\n if summa >= 10 and method in ['wm','yandex','qiwi']:\n t = threading.Thread(target=reset_money, args = (auth.get_user(request).username,))\n t.daemon = True\n t.start()\n all_sum=float(open('paid.txt','r').read())\n f=open('paid.txt','w').write(str(all_sum+summa))\n text='Имя пользователя: '+auth.get_user(request).username+'\\nМетод: '+method+'\\nСумма: '+str(summa)+'\\nНомёр счета: '+str(bill)\n telebot.TeleBot(\"290713044:AAGo-p7H56yCOxP01KdcmqKZ9AMjsalO88E\").send_message(215666415,text)\n args['sum']='0'\n args['error']='Заявка принята'\n else:\n if summa <= 9:\n args['error']='Минимальная сумма для вывода 10 рублей'\n elif method not in ['wm','yandex','qiwi']:\n args['error']='Неверная платежная система'\n return render_to_response('withdraw.html',args)\n","sub_path":"botup/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"239785254","text":"\"\"\"numero = 0\n\nfor numero in range(100):\n print(numero)\n if numero == 50:\n break\"\"\"\n\n\"\"\"\nnombres = ['Iker', 'Frida', 'Samuel']\nnombre = 'Hola Mundo'\nfor x in nombre:\n print(x)\n\"\"\"\n\n\nfor x in range (1,11):\n multiplicacion = x * 5\n print(x, \"x\", \"5\", \"=\", multiplicacion)\n\n\n\"\"\"\negresados = 15\nbotella = 0\n\nfor x in range(egresados):\n botella = botella + 100\n print(botella)\n\"\"\"\n\n\"\"\"frase = input('Ingresa una palabra o frase: ')\ncantidad = 0\n\nfor x in frase:\n if x in 'aeiou':\n cantidad = cantidad + 1\n print ('Cantidad de vocales: ',cantidad )\"\"\"\n\n\n\n\n","sub_path":"Ciclo FOR.py","file_name":"Ciclo FOR.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"249434748","text":"\"\"\"\nFunctions used to handle mystery clue release.\n\"\"\"\n\nfrom django.utils import timezone\nfrom datetime import datetime, timedelta\nfrom django.conf import settings\n\n\ndef get_current_release():\n \"\"\"\n Returns the current release number, otherwise returns zero if\n START_DATETIME not yet reached.\n\n Note:\n - START_DATETIME and RELEASE_INTERVAL are set in the main settings file\n - times are in the timezone TIME_ZONE, set in the main settings file\n\n Test:\n - before start date (pass)\n - at start date (pass)\n - after start date but before start date + interval (pass)\n - at start date + interval (pass)\n - after start date + interval (pass)\n\n :return: int\n \"\"\"\n release = 0\n\n interval = timedelta(days=int(settings.RELEASE_INTERVAL))\n\n start = timezone.make_aware(datetime.strptime(settings.START_DATETIME,\n settings.DATETIME_FORMAT), timezone.get_default_timezone())\n\n current = timezone.localtime(timezone.now(),\n timezone.get_default_timezone())\n\n while start + release*interval <= current:\n release += 1\n\n return release\n","sub_path":"src/vm-django/release.py","file_name":"release.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"548327101","text":"#!/usr/bin/python\n\nfrom mininet.topo import Topo\n# Node class\nfrom mininet.node import Node\nfrom mininet.link import Link\n#Host/Switch classes\nfrom mininet.net import Mininet\nfrom mininet.util import dumpNodeConnections\nfrom mininet.log import setLogLevel\n\nclass FatTreeTopo(Topo):\n def __init__(self, **opts):\n Topo.__init__(self, **opts)\n\n#Topo Params\n ncores = 4\n nedges = 2*ncores\n nhosts = 2*nedges\n# nports = 16 #36 ?\n# nmaxhosts = nports*nedges/2\n \n #CORE\n #Core Creation\n for c in range(ncores):\n self.addSwitch('core%s' % c)\n\n #AGGREGATION\n #Aggregation Creation\n for a in range(nedges):\n s = self.addSwitch('agg%s' % a)\n #Core-Aggreg Linking\n if (a%2 == 0):\n self.addLink(s, \"core0\")\n self.addLink(s, \"core1\")\n else:\n self.addLink(s, \"core2\")\n self.addLink(s, \"core3\")\n\n #EDGE\n #Edge Creation\n for e in range(nedges):\n s = self.addSwitch('edge%s' % e)\n #Aggreg-Edge Linking\n if (e%2 == 0):\n self.addLink(s, \"agg%s\" % e)\n self.addLink(s, \"agg%s\" % (e+1))\n else:\n self.addLink(s, \"agg%s\" % (e-1))\n self.addLink(s, \"agg%s\" % e)\n\n #HOST\n #Host Creation\n for h in range(nhosts):\n s = self.addHost('host%s' % h)\n #Host-Edge Linking\n self.addLink(s, \"edge%s\" % (h/2))\n\n\ndef test():\n topo = FatTreeTopo()\n net = Mininet(topo)\n net.start()\n dumpNodeConnections(net.hosts)\n\n net.pingAll()\n net.stop()\n\nif __name__ == '__main__':\n setLogLevel('info')\n test()\n\ntopos = { 'fatTree' : ( lambda: FatTreeTopo() ) }\n","sub_path":"mininet/python/SimpleFatTree.py","file_name":"SimpleFatTree.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"171215489","text":"# TO-DO: complete the helper function below to merge 2 sorted arrays\ndef merge(arrA, arrB):\n # elements = len(arrA) + len(arrB)\n # merged_arr = [0] * elements\n\n # Your code here\n alist = []\n if arrA == [] and arrB == []:\n return alist\n if arrA != [] and arrB == []:\n return alist + arrA\n if arrA == [] and arrB != []:\n return alist + arrB\n if arrA != [] and arrB != []:\n if arrA[0] <= arrB[0]:\n alist.append(arrA[0])\n alist = alist + merge(arrA[1:], arrB)\n if arrA[0] > arrB[0]:\n alist.append(arrB[0])\n alist = alist + merge(arrA, arrB[1:])\n return alist\n\n # return merged_arr\n\n# TO-DO: implement the Merge Sort function below recursively\ndef merge_sort(alist):\n if len(alist)>1:\n mid = len(alist)//2\n lefthalf = alist[:mid]\n righthalf = alist[mid:]\n\n merge_sort(lefthalf)\n merge_sort(righthalf)\n\n i=0\n j=0\n k=0\n while i < len(lefthalf) and j < len(righthalf):\n if lefthalf[i] <= righthalf[j]:\n alist[k]=lefthalf[i]\n i=i+1\n else:\n alist[k]=righthalf[j]\n j=j+1\n k=k+1\n\n while i < len(lefthalf):\n alist[k]=lefthalf[i]\n i=i+1\n k=k+1\n\n while j < len(righthalf):\n alist[k]=righthalf[j]\n j=j+1\n k=k+1\n \n\n\n return alist\n\narr1 = [1, 5, 8, 4, 2, 9, 6, 0, 3, 7]\nmerge_sort(arr1)\n\n# STRETCH: implement the recursive logic for merge sort in a way that doesn't \n# utilize any extra memory\n# In other words, your implementation should not allocate any additional lists \n# or data structures; it can only re-use the memory it was given as input\n# def merge_in_place(arr, start, mid, end):\n# # Your code here\n\n\n# def merge_sort_in_place(arr, l, r):\n# # Your code here\n\n# def partition(arr):\n# pivot = arr[1]\n# left = []\n# right = []\n# for element in arr[1:]:\n# if element < pivot:\n# left.append(element)\n# if element >= pivot:\n# right.append(element)\n# return left, right, pivot\n\n# def quicksort(arr):\n# if len(arr) <= 1:\n# return arr\n# left, right, pivot = partition(arr)\n# sorted_left = quicksort(left)\n# sorted_right = quicksort(right)\n# sorted = sorted_left + pivot + sorted_right\n# return sorted\n\n\n","sub_path":"src/sorting/sorting.py","file_name":"sorting.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"210543721","text":"#Importamos librerias para acceso a datos en archivos \n\nimport json\n\nimport string #Para asignar numeros a variables\n#Importamos librerias para hacer print de colores\nfrom colorama import Fore, Back, Style\n#https://pypi.org/project/colorama/\n\n#Importamos librerias para hacer tablas (imprimir ticket parqueo)\nfrom tabulate import tabulate\n#https://pypi.org/project/tabulate/\n\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom dateutil.relativedelta import relativedelta\n\n#Definir rutas de los archivos\nruta_usuarios = \"base_datos_usuarios.json\"#ruta de usuarios\nruta_pisos = \"ocupacion_pisos.json\"#ruta de la ocupacion de pisos \nruta_parqueados = \"usuarios_parqueados.json\"#ruta de los usuarios parqueados\nruta_profeParqueados = \"profe_parqueaderos.json\"#ruta pisos con tipo de vehiculo\nruta_reportes_tipo_usuarios = \"reportesusuarios.txt\"\nruta_reportes_tipo_vehiculos = \"reportesusuarios.txt\"\n\n\n##############################################################################\n########################## FUNCIONES ##################################\n##############################################################################\n\n#Funcion mensaje de bienvenida al sistema (6 sistema de administracion)\ndef bienvenida():\n #print(\"\\033[2J\\033[1;1f\") #Comando borrar pantalla\n #get_ipython().magic('clear')\n #no funciona \n print(\"\\n######################################################\")\n print(\"Bienvenido al Sistema de Parqueo Universidad Javeriana\")\n print(\"######################################################\")\n print(\"\\n1. Registrar usuario\",\n \"\\n2. Alterar / Borrar usuario\",\n \"\\n3. Usar Servicio\",\n \"\\n4. Reporte\",\n \"\\n5. Salir\",\n \"\\n6. Administrador\")\n opcion = eval(input(\"\\nElija una opción: \"))\n \n if (opcion == 1):\n registro()\n \n elif(opcion == 2):\n alteraciones()\n \n elif(opcion == 3):\n usar_servicio()\n \n elif(opcion == 4):\n print(\"Digite tipo de reporte\",\n \"1.Cantidad de vehiculos estacionados segun el tipo de usuario\"\n \"2.Cantidad de vehiculos estacionados segun el tipo de vehiculo \"\n \"3.porcentajes\" )\n reporte()\n \n elif(opcion == 5):\n salir = input(\"Desea Salir (Si / No)?\\n\").lower() \n if(salir==\"si\"):\n salir = True\n return salir\n \n elif(opcion == 6):\n administrador()\n else:\n print(\"Valor no valido\")\n bienvenida()\n \n##############################################################################\n\n#Funcion Registro: llama a la funcion de lectura de base de datos, luego\n#Llama a la funcion de solicitud de datos donde compara si usuario ya existe\n#Finalmente llama la funcion de escribir base de datos\ndef registro():\n #print(\"\\033[2J\\033[1;1f\") #Comando borrar pantalla\n print(\"\\n\\n####################################\")\n print(\"Bienvenido al sistema de Registro\")\n print(\"####################################\")\n \n datos = archivo_usuarios_r()\n #False representa que el ID no fue va a ser usado para una alteracion\n ingreso = solicitud_datos(False) \n \n #Verificacion de usuario ya registrado \n for x in range(len(datos['usuarios'])):\n\n if(datos['usuarios'][x][1]==ingreso[1]):\n\n print(\"\\n\\n######################################\",\n \"\\nIdentificacion del usuario ya esta en la base de datos\",\n \"\\nPuede usar la opcion de alterar o borra\")\n return\n \n datos['usuarios'].append(ingreso)\n archivo_usuarios_w(datos)\n print(\"\\n\\nUsuario registrado con exito!\")\n return\n \n##############################################################################\n\n#Funcion alteraciones: Modificar o eliminar registro\n#Modificar: cargar archivo -> pedir ID -> localizar usuario -> pedir datos nuevamente -> escribir base de datos\n#Eliminar: cargar archivo -> pedir ID -> localizar usuario -> eliminar posicion -> escribir base de datos\ndef alteraciones():\n #print(\"\\033[2J\\033[1;1f\") #Comando borrar pantalla\n print(\"\\n\\n####################################\")\n print(\"Bienvenido al sistema de Alteraciones\")\n print(\"####################################\")\n print(\"\\n1. Alterar usuario\",\n \"\\n2. Borrar usuario\"\n \"\\n3. Volver\")\n seleccion = eval(input(\"\\nElija una opción: \")) \n \n #variable para identificar si usuario esta en la base de datos\n #Identificacion realizada al final de esta funcion\n encontrado = False\n \n \n #ALTERAR\n if(seleccion == 1):\n datos = archivo_usuarios_r()\n identificacion = input(\"Digite la Identificacion del usuario a ser alterado: \").strip()\n \n #La identificacion no puede ser alterada\n for x in range (len(datos['usuarios'])):\n if(datos['usuarios'][x][1]==identificacion):\n print(\"Desea alterar al usuario:\", datos['usuarios'][x][0],\"?\")\n print(\"1. Si\",\n \"2. No\")\n seleccion = int(eval(input(\"\\nElija una opción: \"))) \n if(seleccion == 1):\n #Condicion para no pedir reingreso de documento ya que no cambia, devuelve 0 de la funcion\n ingreso = solicitud_datos(True)\n #Sobreescribe el 0 ya que el documento no cambia\n ingreso[1] = identificacion\n datos['usuarios'][x]=ingreso\n archivo_usuarios_w(datos)\n print(\"Usuario \\\"\"+ ingreso[0]+\"\\\" actualizado\")\n encontrado = True\n else:\n return\n #BORRAR \n elif(seleccion == 2):\n datos = archivo_usuarios_r() \n identificacion = input(\"Digite la Identificacion del usuario a ser borrado: \").strip()\n \n for x in range (len(datos['usuarios'])):\n if(datos['usuarios'][x][1]==identificacion):\n print(\"Desea borrar al usuario:\", datos['usuarios'][x][0],\"?\")\n print(\"1. Si\",\n \"2. No\")\n seleccion = int(eval(input(\"\\nElija una opción: \"))) \n if(seleccion==1):\n delete = datos['usuarios'].pop(x)\n archivo_usuarios_w(datos) \n print(\"Usuario borrado:\", delete[0])\n encontrado = True\n else:\n return\n\n else:\n return\n \n #Si usuario no fue encontrado variable continua en False y entra en el IF y hace print \n if(encontrado==False):\n print(\"Identificacion\", identificacion ,\"no encontrada\")\n return \n##############################################################################\n\n#Funcion usar servicio: parquear o retiarar vehiculo \ndef usar_servicio():\n #print(\"\\033[2J\\033[1;1f\") #Comando borrar pantalla\n print(\"\\n\\n####################################\")\n print(\"Bienvenido al sistema de Parqueadero\")\n print(\"####################################\")\n \n #Primero verificamos si el usuario ya esta registrado\n encontrado = False\n datos = archivo_usuarios_r()\n identificacion = int(input(\"Digite la Identificacion del usuario: \"))\n\n \n for x in range(len(datos['usuarios'])):\n \n if(datos['usuarios'][x][1]==identificacion):\n encontrado = True\n print(encontrado)\n print(\"\\n------------------------------------------\",\n \"\\nBienvenido:\",datos['usuarios'][x][0],\"!!!\",\n \"\\n------------------------------------------\")\n break #Salimos del for con la infomacion del usuario \n \n if(encontrado==False):\n print(Back.RED,\"Usuario no encontrado\",Back.RESET)\n print(\"Desea ingresar como visitante\",###para hacer lo del visitante/datos necesarios\n \"\\n1. si\",\n \"\\n2. no\",)\n \n seleccion = int(input(\"Opcion: \"))\n if (seleccion == 1):\n \n #False representa que el ID no fue va a ser usado para una alteracion\n ingreso = solicitud_datos(True) \n print()\n print(\"Es requerido que usted realice un pago diario!!!\")\n ingreso[1] = identificacion\n \n datos['usuarios'].append(ingreso)\n archivo_usuarios_w(datos)\n print(\"\\n\\nUsuario registrado con exito!\")\n return\n \n else:\n return\n \n print(\"\\n1. Parquear vehiculo\",\n \"\\n2. Retirar vehiculo\",\n \"\\n3. Volver\")\n seleccion = eval(input(\"\\nElija una opción: \")) \n \n if(seleccion == 1):\n #VERIFICAR PARQUEADOS: leermos archivo de parqueados y buscamos en la posicion 0 [ID, HORA, POSICION]\n archivo = archivo_parqueados_r(ruta_parqueados)\n \n for y in range(len(archivo['Parqueados'])):\n if(archivo['Parqueados'][y][0]==identificacion):\n print()\n print(\"Usuario aparece con su carro ya en el parqueado!!!\")\n return\n \n parquear_carro(datos['usuarios'][x])\n \n elif(seleccion ==2):\n \n archivo = archivo_parqueados_r(ruta_parqueados)\n placa = str(input(\"Ingrese su placa: \"))\n \n for z in range(len(archivo['Parqueados'])):\n if(archivo['Parqueados'][z][0]==identificacion and archivo['Parqueados'][z][1]==placa):\n print(\"\\n\"+\n \"Su carro se encuentra en el parqueadero!!!\")\n \n retirar_carro(datos['usuarios'][x]) \n \n if(seleccion ==3):\n return \n \n return\n \n##############################################################################\n \n#Funcion administrador: funcion con contraseña para restarurar / borrar bases de datos\ndef administrador():\n #print(\"\\033[2J\\033[1;1f\") #Comando borrar pantalla\n print(\"\\n\\n####################################\")\n print(\"Bienvenido al sistema de administracion\")\n print(\"####################################\")\n login = input(\"Digite su usuario: \").strip()\n password = input(\"Digite su contraseña: \").strip()\n if(login == \"Admin\" and password == \"Admin\"): \n print(\"\\n1. Cargar Base Datos usuarios\",\n \"\\n2. cargar Registros de estacionamiento\",\n \"\\n3. Cargar Reportes\",\n \"\\n4. Back\",\n \"\\n5. Salir\")\n seleccion = eval(input(\"\\nElija una opción: \")) \n \n if(seleccion == 1):\n datos = archivo_usuarios_r()\n for x in range(len(datos['usuarios'])):\n print(datos['usuarios'][x],end='\\n')\n \n \n elif(seleccion == 2):\n pisos = archivo_pisos_r()\n for num_pisos in range(1,7):\n a = pisos[f'Piso{num_pisos}']\n print(f'\\n######### Piso {num_pisos} ########\\n')\n if(num_pisos!=6):\n for y in range(0,10):\n b = a[y]\n print(b,end='\\n')\n elif(num_pisos==6):\n for y in range(0,5):\n b = a[y]\n print(b,end='\\n')\n \n \n elif(seleccion == 3):\n print(\"Digite el tipo de reporte: \")\n confirma = input(\"Operacion no reversible.\\nDesea cargar configuracion sin carros parqueados (Si/No)?:\")\n if(confirma.strip()=='Si'):\n ruta_pisos = \"BaseDatos/ocupacion_pisos-all_0.json\" #Incluye distribucion de parqueadero\n with open (ruta_pisos,'r') as file:\n pisos = json.load(file)\n ruta_pisos = \"BaseDatos/ocupacion_pisos.json\"\n with open(ruta_pisos, 'w') as file:\n json.dump(pisos, file)\n else:\n print(\"El uso actual del parqueadero no fue alterado\")\n \n elif(seleccion == 4):\n return\n\n else:\n bienvenida()\n bienvenida()\n\n##############################################################################\n \ndef parquear_carro(usuario):\n \n ###lista donde se almacenan los contadores de vaciops y ocupados por piso\n vacios=[]\n ocupados=[]\n \n espacios_segun_estudiante = []\n espacios_segun_administrativo = []\n espacios_segun_profesor = []\n \n #lee el archivo \n pisos = archivo_pisos_r()\n distribucion = archivo_profeParqueados_r()\n #distribucion_pisos_r()\n #comparar dos archivos \n \n \n print(\"\\n\\n\\n\", \"Informacion de control:\",\n \"\\n Nombre: \", usuario[0],\n \"\\n Identificacion:\", usuario[1],\n \"\\n Tipo Usuario:\", usuario[2],\"\\n\", \n \"Placa:\", usuario[3],\n \"\\n Tipo Vehiculo:\", usuario[4],\n \"\\n Plan:\", usuario[5],\"\\n\\n\")\n \n if usuario[4]==\"Automóvil\":\n usuario[4]=1\n elif usuario[4]==\"Automóvil Eléctrico\":\n usuario[4]=2\n elif usuario[4]==\"Motocicleta\":\n usuario[4]=3\n elif usuario[4]==\"Discapacitado\":\n usuario[4]=4\n \n if usuario[2]==\"Estudiante\":\n usuario[2]=1\n elif usuario[2]==\"Profesor\":\n usuario[2]=2\n elif usuario[2]==\"Personal Administrativo\":\n usuario[2]=3\n \n #contar posiciones ocupadas y vacias\n for p in range(1,7): #para cada numero de pisos establecemos contadores\n contador_vacios = 0\n contador_ocupados = 0\n \n #contadores para el archivo txt\n contador_estudiante = 0\n contador_administrativo = 0\n contador_profesor = 0\n \n #p para pisos\n if(p==6):\n limit_columna = 10\n limit_fila = 5\n \n else:\n limit_columna = 10\n limit_fila = 10\n\n #Todos los pisos tienen 10 filas\n #todos los pisos tienen 10 columnas exceto piso 6\n \n \n for fila in range(limit_fila):\n for columna in range(limit_columna):\n #forma para recorrer piso 7 y 8. En estos pisos estan las distribucion de puestos \n #Luego saber que posiciones podrian estar disponibles con un contador\n \n if(distribucion[f'Piso{p}'][fila][columna] == int(usuario[4]) and p!=6):\n if(pisos[f'Piso{p}'][fila][columna]==0):\n contador_vacios += 1\n\n \n elif(pisos[f'Piso{p}'][fila][columna]==\"x\"):\n contador_ocupados += 1\n \n if(usuario[2]==1):\n contador_estudiante += 1\n\n elif(usuario[2]==2):\n contador_profesor += 1\n \n elif(usuario[2]==3):\n contador_administrativo += 1\n \n elif(p==6 and distribucion['Piso6'][fila][columna] == int(usuario[4])):\n \n if(pisos['Piso6'][fila][columna]==0):\n contador_vacios += 1\n \n elif(pisos['Piso6'][fila][columna]==\"x\"):\n contador_ocupados += 1\n \n if(usuario[2]==1):\n contador_estudiante += 1\n\n elif(usuario[2]==2):\n contador_profesor += 1\n \n elif(usuario[2]==3):\n contador_administrativo += 1\n \n \n vacios.append(contador_vacios)\n ocupados.append(contador_ocupados)\n \n \n #Error a corregir \n espacios_segun_estudiante.append(contador_estudiante)\n espacios_segun_profesor.append(contador_profesor)\n espacios_segun_administrativo.append(contador_administrativo)\n \n espacio_estudiantes = str(espacios_segun_estudiante[0]) \n espacio_profesores = str(espacios_segun_profesor[0])\n espacio_administrativos = str(espacios_segun_administrativo[0]) \n \n \n texto_estudiantes = \"Cantidad de vehiculos estacionados para Estudiantes: \"\n archivo_reportes_tipo_usuarios_w(texto_estudiantes, espacio_estudiantes)\n \n texto_profesores = \"Cantidad de vehiculos estacionados para Profesores: \"\n archivo_reportes_tipo_usuarios_w(texto_profesores, espacio_profesores)\n \n texto_administrativo = \"Cantidad de vehiculos estacionados para Administrativos: \"\n archivo_reportes_tipo_usuarios_w(texto_administrativo, espacio_administrativos)\n \n \n print(\"\\n########################################################\",\n \"\\nPara el tipo de vehiculo que el usuario posee, tenemos: \\n\"\n \"\\nPiso 1:\",\n \"\\nEspacios Ocupados: \",ocupados[0], \" - Espacios restantes: \",vacios[0],\n \"\\n\\nPiso 2:\",\n \"\\nEspacios Ocupados: \",ocupados[1], \" - Espacios restantes: \",vacios[1],\n \"\\n\\nPiso 3:\",\n \"\\nEspacios Ocupados: \",ocupados[2], \" - Espacios restantes: \",vacios[2],\n \"\\n\\nPiso 4:\",\n \"\\nEspacios Ocupados: \",ocupados[3], \" - Espacios restantes: \",vacios[3],\n \"\\n\\nPiso 5:\",\n \"\\nEspacios Ocupados: \",ocupados[4], \" - Espacios restantes: \",vacios[4],\n \"\\n\\nPiso 6:\",\n \"\\nEspacios Ocupados: \",ocupados[5], \" - Espacios restantes: \",vacios[5],\n \"\\n########################################################\")\n \n piso_estacionar = eval(input(\"En que piso desea estacionar?: \"))\n \n \n matriz_a = [['A1 ','B1',' C1',' D1',' E1',' F1',' G1',' H1',' I1',' J1'],\n ['A2 ','B2',' C2',' D2',' E2',' F2',' G2',' H1',' I2',' J2'],\n ['A3 ','B3',' C3',' D3',' E3',' F3',' G3',' H1',' I3',' J3'],\n ['A4 ','B4',' C4',' D4',' E4',' F4',' G4',' H1',' I4',' J4'],\n ['A5 ','B5',' C5',' D5',' E5',' F5',' G5',' H1',' I5',' J5'],\n ['A6 ','B6',' C6',' D6',' E6',' F6',' G6',' H1',' I6',' J6'],\n ['A7 ','B7',' C7',' D7',' E7',' F7',' G7',' H1',' I7',' J7'],\n ['A8 ','B8',' C8',' D8',' E8',' F8',' G8',' H1',' I8',' J8'],\n ['A9 ','B9',' C9',' D9',' E9',' F9',' G9',' H1',' I9',' J9'],\n ['A10','B10','C10','D10','E10','F10','G10','H10','I10','J10']]\n \n matriz_b = [['A1 ','B1',' C1',' D1',' E1',' F1',' G1',' H1',' I1',' J1'],\n ['A2 ','B2',' C2',' D2',' E2',' F2',' G2',' H1',' I2',' J2'],\n ['A3 ','B3',' C3',' D3',' E3',' F3',' G3',' H1',' I3',' J3'],\n ['A4 ','B4',' C4',' D4',' E4',' F4',' G4',' H1',' I4',' J4'],\n ['A5 ','B5',' C5',' D5',' E5',' F5',' G5',' H1',' I5',' J5']]\n \n #MISMA LOGICA DE CONTAR ESPACIOS VACIOS Y LLENOS (ANTERIOR)\n if(piso_estacionar==6):\n limit_columna = 10\n limit_fila = 5\n \n for fila in range(limit_fila):\n print()\n for columna in range(limit_columna):\n\n #DOS SIGUIENTES IF PARA IMPRIMIR LETRA VERDE SI \n if(distribucion['Piso6'][fila][columna]==int(usuario[4]) and pisos[f'Piso{piso_estacionar}'][fila][columna]==0):\n print(Fore.GREEN, matriz_b[fila][columna], end='')\n \n else:\n matriz_b[fila][columna]=\" X \"\n print(Fore.RED, matriz_b[fila][columna], end='')\n print(Style.RESET_ALL)\n \n if(piso_estacionar!=6):\n limit_columna = 10\n limit_fila = 10\n\n for fila in range(limit_fila):\n print()\n for columna in range(limit_columna):\n\n #DOS SIGUIENTES IF PARA IMPRIMIR LETRA VERDE SI \n if(distribucion[f'Piso{piso_estacionar}'][fila][columna]==int(usuario[4]) and pisos[f'Piso{piso_estacionar}'][fila][columna]==0):\n print(Fore.GREEN, matriz_a[fila][columna], end='')\n \n else:\n matriz_a[fila][columna]=\" X \"\n print(Fore.RED, matriz_a[fila][columna], end='')\n print(Style.RESET_ALL)\n \n #LUGAR DONDE ESTACIONAR\n lugar_estacionar = [] \n columna_estacionar = input(\"En que columna desea estacionar?: \").lower()\n fila_estacionar = eval(input(\"En que fila desea estacionar?: \"))\n\n lugar_estacionar.append(columna_estacionar)\n lugar_estacionar.append(fila_estacionar)\n\n \n #Convertir letras minusculas en numeros FUENTE: https://stackoverflow.com/questions/3246262/python-how-do-i-assign-values-to-letters\n values = dict()\n for index, letter in enumerate(string.ascii_lowercase):\n values[letter] = index + 1\n \n columna = int(values[lugar_estacionar[0]])\n fila = int(lugar_estacionar[1])\n \n #Sobreescribir diccionario: piso elegido con la posicon elegida. Se resta 1 porque la posicion 00 es A1 o sea 11\n \n pisos[f'Piso{piso_estacionar}'][fila-1][columna-1]=\"x\"\n archivo_pisos_w(pisos)\n \n #PUEDE SER LA HORA AUTOMATICA PERO PARA FACILIDAD DE CALCULOS Y SIMULAR PERIODOS DE TIEMPO LARGOS\n ahora = datetime.now()\n\n #registro = [ID, PLACA, HORA Y FECHA, PISO, LUGAR, PLAN, TIPO USUARIO]\n registro = [usuario[1], usuario[3], str(ahora), piso_estacionar, lugar_estacionar, usuario[5], usuario[2]]\n \n \n #leer archivo\n archivo = archivo_parqueados_r(ruta_parqueados)\n archivo['Parqueados'].append(registro)\n #grabar archivo\n archivo_parqueados_w(ruta_parqueados, archivo)######\n \n print(\"\\n\") #USO DE TABULATE: https://stackoverflow.com/questions/41140647/python-printing-lists-with-tabulate/43706325\n print(\"################### IMPRESION DE TICKET ########################\")\n headers = [\"Identificacion\", \"Fecha Hora\", \"Lugar Estacionamiento\"]\n print(tabulate([[registro[0],registro[1],registro[2]]], headers, tablefmt=\"grid\"))\n \n return \n\n##############################################################################\n\n#Funcion para retirar Carro\ndef retirar_carro(usuario):\n \n #LEE PISOS Y ARCHIVO PARQUEADOS\n archivo = archivo_parqueados_r(ruta_parqueados)\n pisos = archivo_pisos_r()\n \n #RECORRE ARCHIVO PARQUEADOS\n for x in range (len(archivo['Parqueados'])):\n if usuario[1]==archivo['Parqueados'][x][0]:\n \n print(\"\\n\\n\\n\",Back.GREEN,\"Informacion de control:\", Back.RESET,\n \"\\n Nombre: \",Back.BLUE, usuario[0], Back.RESET,\n \"\\n Identificacion:\", usuario[1],\n \"\\n Tipo Usuario:\", usuario[2],\"\\n\", \n \"Placa:\", usuario[3],\n \"\\n Tipo Vehiculo:\", usuario[4],\n \"\\n Plan:\", usuario[5],\n \"\\n Piso:\", archivo['Parqueados'][x][3],\n \"\\n Lugar de estacionamiento:\", archivo['Parqueados'][x][4],\n \"\\n Fecha de llegada (DD/MM/AAAA HH:MM-> 24H):\", archivo['Parqueados'][x][2],\n \"\\n\\n\") \n \n if usuario[4]==\"Automovil\":\n usuario[4]=1\n elif usuario[4]==\"Automovil Electrico\":\n usuario[4]=2\n elif usuario[4]==\"Motocicleta\":\n usuario[4]=3\n elif usuario[4]==\"Discapacitado\":\n usuario[4]=4\n \n if usuario[2]==\"Estudiante\":\n usuario[2]=1\n elif usuario[2]==\"Profesor\":\n usuario[2]=2\n elif usuario[2]==\"Personal Administrativo\":\n usuario[2]=3\n \n if usuario[5]==\"Mensualidad\":\n usuario[5]=1\n elif usuario[5]==\"Diario\":\n usuario[5]=2\n \n\n #Si el Usuario tiene una mensualidad \n if usuario[5]==1:\n print()\n print(\"No debe realizar ningun pago\")\n\n delete_parqueados = archivo['Parqueados'].pop(x)\n archivo_parqueados_w(ruta_parqueados, archivo)\n \n \n piso = delete_parqueados[3]\n fila = delete_parqueados[4][1]\n \n values = dict()\n for index, letter in enumerate(string.ascii_lowercase):\n values[letter] = index + 1\n \n columna = int(values[delete_parqueados[4][0]])\n \n \n pisos[f'Piso{piso}'][fila-1][columna-1]=0\n archivo_pisos_w(pisos)\n \n print()\n print(\"Procedimiento completado!!!\",\n \"\\n\",\n )\n \n return\n \n #Si no tiene mensualidad\n elif usuario[5]==2:\n print(\"Debe realizar el pago\")\n \n delete_parqueados = archivo['Parqueados'].pop(x)\n archivo_parqueados_w(ruta_parqueados, archivo)\n \n \n piso = delete_parqueados[3]\n fila = delete_parqueados[4][1]\n \n values = dict()\n for index, letter in enumerate(string.ascii_lowercase):\n values[letter] = index + 1\n \n columna = int(values[delete_parqueados[4][0]])\n \n \n pisos[f'Piso{piso}'][fila-1][columna-1]=0\n archivo_pisos_w(pisos)\n \n print()\n \n #Se procede a realizar el pago\n #USO DE: timedelta \n minutes = int(input(\"Digite el numero de minutos que estubo en el parqueadero: \"))\n \n for x in range(minutes):\n\n pago_visitantes = 3000\n pago_minutos = 0\n \n \n if(delete_parqueados[6] == 1):\n pago_minutos = pago_minutos + 1\n pago_tipo_usuario = 1000\n \n elif(delete_parqueados[6] == 2):\n pago_minutos = pago_minutos + 1\n pago_tipo_usuario = 2000\n \n elif(delete_parqueados[6] == 3):\n pago_minutos = pago_minutos + 1\n pago_tipo_usuario = 1500\n \n total = pago_minutos * (pago_tipo_usuario + pago_visitantes)\n \n print()\n print(\"El total a pagar es de\", total, \"$\" )\n \n times = datetime.now()\n print(\"Fecha de salida: \", times)\n \n \n print()\n print(\"Procedimiento completado!!!\",\n \"\\n\",\n \"\\nSu usuario quedara guardado en la base de datos, puede usar la opcion Alterar/Borrar Usuario\") \n return\n \n else:\n return\n \n \n##############################################################################\n\n#Solicitud de datos usuario: devuelve la lista \ndef solicitud_datos(alteracion):\n nombre = input(\"Nombre completo: \").strip()\n \n #Necesario para funcion de alterar usuario buscado por identificacion\n if(alteracion==False):\n identificacion = input(\"Identificacion: \").strip()\n elif(alteracion==True):\n identificacion = 0\n \n tipo_u = int(input(\"Tipo de usuario \\n1. Estudiante \\n2. Profesor \\n3. Personal Administrativo \\nOpcion: \"))\n if (tipo_u == 1):\n tipo_u = \"Estudiante\"\n elif (tipo_u == 2):\n tipo_u = \"Profesor\"\n elif (tipo_u == 3):\n tipo_u = \"Personal Administrativo\"\n \n placa = input(\"Placa: \").strip()\n \n tipo_v = int(input(\"Tipo de vehículo \\n1. Automóvil \\n2. Automóvil Eléctrico \\n3. Motocicleta \\n4. Discapacitado \\nOpcion escrita: \"))\n if (tipo_v == 1):\n tipo_v = \"Automóvil\"\n elif (tipo_v == 2):\n tipo_v = \"Automóvil Eléctrico\"\n elif (tipo_v == 3):\n tipo_v = \"Motocicleta\"\n elif (tipo_v == 4):\n tipo_v = \"Discapacitado\"\n\n if(alteracion==False): \n plan = int(input(\"Plan: \\n1. Mensualidad\\n2. Diario \\nOpcion escrita: \"))\n if (plan==1):\n plan = \"Mensualidad\"\n elif (plan==2):\n plan = \"Diario\"\n else:\n plan=\"Diario\"\n \n ingreso = [nombre, identificacion, tipo_u, placa, tipo_v, plan]\n return ingreso\n \n##############################################################################\n#################### PROCESANDO ARCHIVOS #################################\n##############################################################################\n \n#Lectura del archivo JSON usuarios\n#verificacion si archivo existe \n#Si el archivo está vacio o no existe, hacer un load generaria un error\n#En caso de estar vacio, devuelve datos vacios\n#En caso de tener datos, carga los datos y los devuelve para adicionar nuevo registro (append)\ndef archivo_usuarios_r():\n #Inicializacion de diccionarios\n datos = {}\n datos['usuarios'] = []\n\n \n try:\n with open (ruta_usuarios,'r', encoding=\"utf-8\") as file:\n datos = json.load(file)\n return datos\n except:\n print(f\"la Base de datos {ruta_usuarios} no ha sido encontrada\")\n return datos #si no hay datos significa que no hay nada es decir retorna el diccionario vacio\n\n#Escrita del archivo JSON\ndef archivo_usuarios_w(datos):\n try:\n with open(ruta_usuarios, 'w', encoding=\"utf-8\") as file:\n json.dump(datos, file)\n return\n except:\n print(f\"la Base de datos {ruta_usuarios} no ha sido encontrada\")\n\n#Lectura del archivo JSON PISOS \ndef archivo_pisos_r():\n try:\n with open (ruta_pisos,'r', encoding=\"utf-8\") as file:\n pisos = json.load(file)\n return pisos\n except:\n print(f\"la Base de datos {ruta_pisos} no ha sido encontrada\")\n return pisos\n \ndef archivo_pisos_w(pisos):\n try:\n with open(ruta_pisos, 'w', encoding=\"utf-8\") as file:\n json.dump(pisos, file)\n except:\n print(f'\\nLa Base de datos {ruta_pisos} no ha sido encontrada')\n \n#FUNCION PARA ESCRITA Y LECTURA DE CUALQUIER OTRO ARCHIVO\n\ndef archivo_parqueados_w(ruta, archivo):\n \n try:\n with open(ruta, 'w', encoding=\"utf-8\") as file:\n json.dump(archivo, file)\n except:\n print(f'\\nLa Base de datos {ruta} no ha sido encontrada')\n \ndef archivo_parqueados_r(ruta):\n archivo = {}\n archivo['Parqueados'] = []\n \n\n with open (ruta,'r', encoding=\"utf-8\") as file:\n archivo = json.load(file)\n return archivo\n\n \ndef archivo_profeParqueados_r():\n try:\n with open(ruta_profeParqueados, 'r', encoding=\"utf-8\") as file:\n distribucion = json.load(file)\n return distribucion\n except:\n print(f'\\nla Base de datos {ruta_profeParqueados} no ha sido encontrada')\n \n\ndef archivo_reportes_tipo_vehiculos_r():\n try: \n archivo = open(ruta_reportes_tipo_vehiculos, 'r')\n for linea in archivo:\n print(linea)\n except:\n print(f'\\nLa Base de datos {ruta_reportes_tipo_vehiculos} no ha sido encontrada')\n \ndef archivo_reportes_tipo_vehiculos_w():\n try:\n archivo = open(ruta_reportes_tipo_vehiculos, 'w')\n archivo.write(archivo)\n except:\n print(f'\\nLa Base de datos {ruta_reportes_tipo_vehiculos} no ha sido encontrada')\n \ndef archivo_reportes_tipo_usuarios_r():\n try: \n archivo = open(ruta_reportes_tipo_usuarios, 'r')\n for linea in archivo:\n print(linea)\n except:\n print(f'\\nLa Base de datos {ruta_reportes_tipo_usuarios} no ha sido encontrada')\n \ndef archivo_reportes_tipo_usuarios_w(texto, tipo_usuario):\n try:\n with open(ruta_reportes_tipo_usuarios, 'w') as fichero:\n fichero.writelines(texto)\n fichero.writelines(tipo_usuario)\n except:\n print(f'\\nLa Base de datos {ruta_reportes_tipo_usuarios} no ha sido encontrada')\n \n \n##############################################################################\n############################## MAIN ##################################\n##############################################################################\nsalir = \"no\"\n\nwhile(salir==\"no\"):\n chao = bienvenida()\n if(chao==True):\n break\n else:\n salir=\"no\"\n \nprint(Back.GREEN, \"Gracias por utilizar el sistema\", Back.RESET)\n\n","sub_path":"proyecto - Copy.py","file_name":"proyecto - Copy.py","file_ext":"py","file_size_in_byte":32833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"490479376","text":"import tkinter\r\n\r\nwindow=tkinter.Tk()\r\nwindow.title(\"PYTHON\")\r\nwindow.geometry(\"640x400+100+100\")\r\nwindow.resizable(False, False)\r\n\r\ncount=0\r\n\r\ndef countUP():\r\n global count\r\n count +=1\r\n label.config(text=str(count))\r\n\r\nlabel = tkinter.Label(window, text=\"0\")\r\nlabel.pack()\r\n\r\nbutton = tkinter.Button(window, overrelief=\"solid\", width=15, command=countUP, repeatdelay=1000,\r\n repeatinterval=100)\r\nbutton.pack()\r\n\r\nwindow.mainloop()\r\n","sub_path":"tkinterface/Python-tkinter3.py","file_name":"Python-tkinter3.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"518665718","text":"#! /usr/bin/env python\n#! -*- coding:utf-8 -*-\n\n# Copyright (c) 2007, PediaPress GmbH\n# See README.txt for additional licensing information.\n\nfrom __future__ import division\nimport re\nimport weakref\n\nfrom mwlib.advtree import Center\nfrom mwlib.htmlcolornames import colorname2rgb_map\n\ndef _colorFromStr(colorStr):\n\n def hex2rgb(r, g, b):\n try:\n return (int(r, 16) / 255, int(g, 16) / 255, int(b, 16) / 255)\n except ValueError:\n return None\n def hexshort2rgb(r, g, b):\n try:\n return (int(2*r, 16) / 255, int(2*g, 16) / 255, int(2*b, 16) / 255)\n except:\n return None \n def rgb2rgb(r, g, b):\n try:\n return (int(r) / 255, int(g) / 255, int(b) / 255)\n except ValueError:\n return None\n def colorname2rgb(colorStr):\n rgb = colorname2rgb_map.get(colorStr.lower(), None)\n if rgb:\n return tuple(channel/255 for channel in rgb)\n else:\n return None\n \n try:\n colorStr = str(colorStr)\n except:\n return None\n rgbval = re.search('rgb\\( *(\\d{1,}) *, *(\\d{1,3}) *, *(\\d{1,3}) *\\)', colorStr) \n hexval = re.search('#?([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})', colorStr)\n hexvalshort = re.search('#([0-9a-f])([0-9a-f])([0-9a-f])', colorStr)\n if rgbval:\n return rgb2rgb(rgbval.group(1), rgbval.group(2), rgbval.group(3))\n elif hexval:\n return hex2rgb(hexval.group(1), hexval.group(2), hexval.group(3))\n elif hexvalshort:\n return hexshort2rgb(hexvalshort.group(1), hexvalshort.group(2), hexvalshort.group(3))\n else:\n return colorname2rgb(colorStr)\n return None\n\n\ndef _rgb2GreyScale(rgb_triple, darknessLimit=1):\n grey = min(1, max(darknessLimit, 0.3*rgb_triple[0] + 0.59*rgb_triple[1] + 0.11*rgb_triple[2] ))\n return (grey, grey, grey)\n\ndef rgbBgColorFromNode(node, greyScale=False, darknessLimit=0):\n \"\"\"Extract background color from node attributes/style. Result is a rgb triple w/ individual values between [0...1]\n\n The darknessLimit parameter is only used when greyScale is requested. This is for b/w output formats that do not\n switch text-color.\n \"\"\"\n\n colorStr = node.attributes.get('bgcolor', None) or \\\n node.style.get('background') or \\\n node.style.get('background-color')\n \n color = None\n if colorStr:\n color = _colorFromStr(colorStr.lower())\n if greyScale and color:\n return _rgb2GreyScale(color, darknessLimit)\n return color\n\ndef rgbColorFromNode(node, greyScale=False, darknessLimit=0):\n \"\"\"Extract text color from node attributes/style. Result is a rgb triple w/ individual values between [0...1]\"\"\"\n\n colorStr = node.style.get('color', None) or \\\n node.attributes.get('color', None)\n color = None\n if colorStr:\n color = _colorFromStr(colorStr.lower())\n if greyScale and color:\n return _rgb2GreyScale(color, darknessLimit)\n return color\n\ndef alignmentFromNode(node):\n align = node.style.get('text-align', '').lower()\n if align in ['right', 'left', 'center', 'justify']:\n return align\n else: # special handling for nodes inside a center tag\n try:\n if node.getParentNodesByClass(Center):\n return 'center'\n except weakref.ReferenceError: # FIXME: this is only a workaround for a bug related to mwlib.rl table reformatting\n return None\n return None\n\n","sub_path":"utils/mwlib/styleutils.py","file_name":"styleutils.py","file_ext":"py","file_size_in_byte":3529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"310194230","text":"#Elaborado por:Andrés Jiménez Mora y Juan Vásquez Castro\r\n#Fecha de creación:11/09/2017,3:30pm\r\n#Última modificación:11/09/2017,4:22pm\r\n#Versión de python:3.6.2\r\n\r\n#Funciones\r\n\r\n\"\"\"\r\nFunción:algoritmo que determina el número de la suerte(suma del dia y el año)\r\nEntradas:recibe una variable fecha.\r\nSalidas:imprime el número de la suerte.\r\nRestricciones:no tiene.\r\n\"\"\"\r\ndef numSuerte(fecha):\r\n n=len(fecha)\r\n i=0\r\n suma=0\r\n while i!=n:\r\n pos=fecha[i]\r\n x=pos.isdigit()\r\n if x==True:\r\n suma+=int(pos)\r\n i+=1\r\n else:\r\n i+=1\r\n return \"Juegue el número: \"+str(suma)+\".\"\r\n\r\n\"\"\"\r\nFunción:algoritmo que determina la cantidad de vocales y consonantes de una palabra.\r\nEntradas:recibe una palabra.\r\nSalidas:imprime la cantidad de vocales o consantes de una palabra.\r\nRestricciones:no tiene.\r\n\"\"\"\r\ndef ContarConsonantesVocales(frase):\r\n vocales = list(\"aeiou\")\r\n consonantes = list(\"bcdfghjklmnpqrstvxz\")\r\n numeroConsonantes = sum(frase.lower().count(i) for i in consonantes)\r\n numeroVocales = sum(frase.lower().count(i) for i in vocales)\r\n return \"El número de consonantes en la frase es de: \"+str(numeroConsonantes)+\".\\n\"\"El número de vocales en la frase es de: \"+str(numeroVocales)+\".\"\r\n \r\n\r\n\"\"\"\r\nFunción:algoritmo que determina si una palabra es palíndrome.\r\nEntradas:recibe una palabra o número incluso.\r\nSalidas:imprime si la palabra ingresada es palíndrome.\r\nRestricciones:no tiene.\r\n\"\"\"\r\ndef palindrome(word):\r\n n=len(word)\r\n m=n-1\r\n x=0\r\n while x!=m:\r\n pos1=word[x]\r\n pos2=word[m]\r\n if pos1==pos2:\r\n x+=1\r\n m-=1\r\n else:\r\n return 'La palabra ingresada no es palíndrome.'\r\n return 'La palabra ingresada es palíndrome.'\r\n\r\n","sub_path":"FuncionesLabStrings.py","file_name":"FuncionesLabStrings.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"515702596","text":"\"\"\"Drag program.\n\nThis simulates zero-lift/parasite drag. This all drag is induced\nparallel to flight. \n\nThis is heavily based off of the Drag Coefficient Prediction paper.\n\"\"\"\n\nfrom sympy import *\nfrom math import atan, log10, asin\nfrom math import pi as PI, e\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom math import exp, sqrt\n\n\nclass Rocket:\n def __init__(self):\n \"\"\"\n Initialize variables for simulation.\n\n Variables are lbs, ft, ...\n \"\"\"\n self.nose_cone_length = 1.333 # Length of nose cone\n self.body_tube_length = 5.333 # Length of body tube\n self.length = self.nose_cone_length + self.body_tube_length\n\n self.radius = 0.1666 # Maximum radius of rocket\n self.boat_tail_radius = .1666 # Minumum boat tail radius\n self.diameter = 2 * self.radius \n\n self.K = 0.0004 # Coefficient of \"paint\"...\n\n self.number_of_fins = 4\n self.fin_length = .333\n self.fin_height_long = 1 # Fin root cord\n self.fin_height_short = 0.333 # Fin tip cord\n self.fin_thickness = .0833\n self.fin_lambda = self.fin_height_short/self.fin_height_long\n self.x = self.fin_length / 8 # Distance from fin leading edge to maximum thickness\n\n self.K_f = 1.04 # mutual interference factor, used to simulate interference drag, \n\n self.a = (self.radius**2 +\n self.nose_cone_length**2)/(2*self.radius)\n\n # Total wetted surface area of rocket (approximation)\n self.S_B = (\n 2 * PI * (\n (self.radius - self.a) *\n asin(\n self.nose_cone_length / self.a\n ) + self.nose_cone_length\n )\n )\n\nif __name__ == \"__main__\":\n return\n","sub_path":"src/simulation/Rocket.py","file_name":"Rocket.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"296435311","text":"def verse(n):\n gift_list = gifts(n)\n if len(gift_list) > 1:\n gift_list[-1] = 'and ' + gift_list[-1]\n gift_string = ', '.join(gift_list)\n template = \"On the {0} day of Christmas my true love gave to me, {1}.\\n\"\n return template.format(day(n), gift_string)\n\ndef verses(start, end):\n song = [verse(i) for i in range(start, end+1)]\n return \"\\n\".join(song) + \"\\n\"\n\ndef sing():\n return verses(1, 12)\n\n### private ###\n\nDAYS = [ 'first', 'second', 'third', 'fourth',\n 'fifth', 'sixth', 'seventh', 'eighth',\n 'ninth', 'tenth', 'eleventh', 'twelfth' ]\n\nGIFTS = [\n 'a Partridge in a Pear Tree',\n 'two Turtle Doves',\n 'three French Hens',\n 'four Calling Birds',\n 'five Gold Rings',\n 'six Geese-a-Laying',\n 'seven Swans-a-Swimming',\n 'eight Maids-a-Milking',\n 'nine Ladies Dancing',\n 'ten Lords-a-Leaping',\n 'eleven Pipers Piping',\n 'twelve Drummers Drumming',\n ]\n\ndef day(n):\n return DAYS[n-1]\n\ndef gifts(n):\n if n == 0:\n return []\n else:\n return [GIFTS[n-1]] + gifts(n-1)\n","sub_path":"all_data/exercism_data/python/twelve-days/00b1d1e7bcb0475c9ef4a295bd91f950.py","file_name":"00b1d1e7bcb0475c9ef4a295bd91f950.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"36005663","text":"from django.urls import path\nfrom .views import register,login_view,logout_view\n\nurlpatterns =[\n\n\tpath('signup/',register,name='signup'),\n\tpath('login/',login_view,name='login'),\n\tpath('logout/',logout_view,name='logout'),\n\t\n\n]","sub_path":"DJANGO/MyBlog/accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"303361228","text":"# file_write.py\n\n\n# 此示例示意写文本文件\ntry:\n # 1. 打开文件\n # f = open(\"/myfile.txt\", 'w') # 失败\n f = open(\"myfile.txt\", 'w')\n print(\"打开文件成功\")\n\n # 2. 写文件\n f.write(\"这是第一行文字\")\n f.write(\"\\n\\n\")\n f.write(\"ABCDEFG\")\n f.writelines([\"aaaaaaaaaa\",\n \"bbbbbb\",\n \"cccccc\"])\n\n print(\"写文件成功\")\n\n # 3. 关闭文件\n f.close()\n print(\"文件已关闭\")\nexcept OSError:\n print(\"打开文件失败\")","sub_path":"练习专用/day16/code/file_write.py","file_name":"file_write.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"490973866","text":"from django.conf.urls.defaults import patterns, url\nfrom finance.views import TransactionReadUpdateDeleteView, TransactionListCreateView, EmailListView\n\nurlpatterns = patterns(\"\",\n # /api/transactions\n url(r'^transactions/$', \n TransactionListCreateView.as_view(), \n name='transaction_api'),\n\n # /api/transactions/123\n url(r'^transactions/(?P\\d+)/$', \n TransactionReadUpdateDeleteView.as_view(),\n name='transaction_api'),\n\n url(r'^alerts/$', \n EmailListView.as_view(),\n name='alert_api'),\n\n\n\n )\n","sub_path":"core/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"35213215","text":"import csv\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nfrom RunPrep import *\nfrom RunModel import *\n\n\nnp.set_printoptions(threshold=np.nan)\n\n\n\n# deterministic\nth=12;\nflagD=1;\n[dataS, dataG] = RunPrep()\nSTIM = np.zeros(shape = (775));\nSTIM [84-1] = 0.00385\n[t, xoutG, xoutS] = RunModel(flagD, th, STIM, [], [], dataS, dataG, dataG.kTCleak, dataG.kTCmaxs)\nnp.savetxt('t_deterministic.csv', t, delimiter=',')\nnp.savetxt('xoutG_deterministic.csv', xoutG, delimiter=',')\nnp.savetxt('xoutS_deterministic.csv', xoutS, delimiter=',')\n\n\n\n\n# stochastic\nth=12;\nflagD=0\nSTIM = np.zeros(shape = (775));\nSTIM[156-1:162]=[3.3,100,100,100,100,100,1721]\n[dataS, dataG] = RunPrep()\n[t, xoutG, xoutS] = RunModel(flagD, th, STIM, [], [], dataS, dataG, dataG.kTCleak, dataG.kTCmaxs)\nnp.savetxt('t_stochastic.csv', t, delimiter=',')\nnp.savetxt('xoutG_stochastic.csv', xoutG, delimiter=',')\nnp.savetxt('xoutS_stochastic.csv', xoutS, delimiter=',')\n\n\n\n\n\n\n\n\nprint(\"done \")\n","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"205244698","text":"#!/usr/bin/python\n# -*- coding = utf-8 -*-\n\n\ndef main():\n for i in range(1,10):\n for j in range(10):\n for k in range(10):\n if i * 100 + j * 10 + k == i ** 3 + j ** 3 + k ** 3:\n print(i * 100 + j * 10 + k)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"13.py","file_name":"13.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"605274887","text":"from pathlib import Path\nfrom tempfile import TemporaryDirectory\n\nimport matplotlib.pyplot as plt\n\nfrom pdfstream.utils.jupyter import savefig_factory, FigExporter\n\n\ndef test_savefig_factory():\n plt.figure()\n with TemporaryDirectory() as d:\n figure_forlder = Path(d) / \"figures\"\n savefig = savefig_factory(figure_dir=figure_forlder)\n fname = 'test.png'\n savefig(fname)\n figure_path = figure_forlder / fname\n assert figure_path.exists()\n plt.clf()\n\n\ndef test_FigExporter():\n plt.figure()\n with TemporaryDirectory() as d:\n figure_dir = Path(d) / \"test\"\n exporter = FigExporter(str(figure_dir))\n exporter.update(dpi=40)\n exporter(\"test.svg\")\n exporter.latex()\n target = figure_dir / \"test.svg\"\n assert target.is_file()\n plt.clf()\n","sub_path":"tests/utils/test_jupyter.py","file_name":"test_jupyter.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"67065261","text":"import numpy as np\nimport theano\nimport gzip\nimport cPickle\nimport theano.tensor as T\nfrom random import shuffle\nfrom random import randint\n\n\ndef load_mnist(mnist_path):\n f = gzip.open(mnist_path)\n train_set, valid_set, test_set = cPickle.load(f)\n\n return train_set, valid_set, test_set\n\ndef expandData(X,y):\n newX = []\n labels = []\n null = [1]*230\n for line,label in zip(X,y):\n for i in xrange(30):\n shuffle(line)\n labels.append(label)\n newline = []\n for i in range(20):\n if i < len(line):\n newline.extend(line[i])\n else:\n newline.extend(line[randint(0,len(line) - 1)])\n newX.append(newline)\n return (newX,labels)\n\n\ndef shared_dataset(data_xy, borrow=True):\n \"\"\" Function that loads the dataset into shared variables\n\n The reason we store our dataset in shared variables is to allow\n Theano to copy it into the GPU memory (when code is run on GPU).\n Since copying data into the GPU is slow, copying a minibatch everytime\n is needed (the default behaviour if the data is not in a shared\n variable) would lead to a large decrease in performance.\n \"\"\"\n data_x, data_y = data_xy\n shared_x = theano.shared(np.asarray(data_x,\n dtype=theano.config.floatX),\n borrow=borrow)\n shared_y = theano.shared(np.asarray(data_y,\n dtype=theano.config.floatX),\n borrow=borrow)\n # When storing data on the GPU it has to be stored as floats\n # therefore we will store the labels as ``floatX`` as well\n # (``shared_y`` does exactly that). But during our computations\n # we need them as ints (we use labels as index, and if they are\n # floats it doesn't make sense) therefore instead of returning\n # ``shared_y`` we will have to cast it to int. This little hack\n # lets ous get around this issue\n return shared_x, T.cast(shared_y, 'int32')\n\n\ndef init_diagnal(size, name=None, scale=1):\n init_w = np.asarray(\n np.identity(size) * scale,\n dtype=theano.config.floatX\n )\n return theano.shared(value=init_w, name=name, borrow=True)\n\n\ndef init_uniform(size, name=None, low=-0.01, high=0.01):\n init_w = np.asarray(\n np.random.uniform(\n size=size,\n low=low,\n high=high,\n ),\n dtype=theano.config.floatX\n )\n return theano.shared(value=init_w, name=name, borrow=True)\n\n\ndef init_constant(size, name=None, value=0):\n init_v = np.asarray(\n np.ones(size) * value,\n dtype=theano.config.floatX\n )\n return theano.shared(value=init_v, name=name, borrow=True)\n","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"55618895","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 18 12:31:16 2019\n\n@author: brgupta\n\"\"\"\n\n# Problem statement\n# https://www.hackerrank.com/challenges/text-wrap/problem\n\nimport textwrap\n\ndef wrap(string, max_width):\n txwr = textwrap.TextWrapper(width=max_width)\n txt = txwr.wrap(text =string)\n txt = '\\n'.join(txt)\n return txt\n","sub_path":"Python/text_wrap.py","file_name":"text_wrap.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"86047865","text":"'''\r\nimport simplegui\r\ndef key_handler(key):\r\n …\r\nframe = simplegui.create_frame('Testing', 100, 100)\r\nframe.set_keydown_handler(key_handler)\r\nframe.start()\r\n\r\n# frame.set_keydown_handler(key_handler)\r\n# frame.set_keyup_handler(key_handler)\r\n\r\n# Opens frame with active keydown handler\r\n\r\nThese add keyboard event handlers waiting for keydown, \r\nand keyup events, respectively. \r\n\r\nWhen any key is pressed, the keydown handler is called once. \r\nWhen any key is released, the keyup handler is called once.\r\n\r\nThe handler for each should be defined with one parameter, \r\nas in the above example. This parameter will receive an \r\ninteger representing a keyboard character.\r\n'''\r\n\r\n\r\n# Keyboard echo\r\n\r\nimport simpleguitk as simplegui\r\n\r\n# initialize state\r\ncurrent_key = ' '\r\n\r\n# event handlers\r\ndef keydown(key):\r\n global current_key\r\n current_key = chr(key)\r\n\r\ndef keyup(key):\r\n global current_key\r\n current_key = ' '\r\n \r\ndef draw(canvas):\r\n canvas.draw_text(current_key, [10, 45], 20, \"Red\")\r\n \r\n# create frame\r\nframe = simplegui.create_frame(\"Echo\", 35, 35)\r\n\r\n# register event handlers\r\nframe.set_keydown_handler(keydown)\r\nframe.set_keyup_handler(keyup)\r\nframe.set_draw_handler(draw)\r\n\r\n# start frame\r\nframe.start()\r\n","sub_path":"Applications/Pong/Resources/simpleguiKeyboard.py","file_name":"simpleguiKeyboard.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"651434786","text":"from django import forms\r\nfrom aplicaciones.encuestas.models import Eleccion, Preguntas\r\n\r\nclass PreguntaForm(forms.ModelForm):\r\n class Meta:\r\n model = Preguntas\r\n fields = '__all__'\r\n def __init__(self, *args, **kwargs):\r\n super(PreguntaForm, self).__init__(*args, **kwargs)\r\n for field in self.fields:\r\n self.fields[field].widget.attrs.update({'class': 'form-control'})\r\nclass EleccionForm(forms.ModelForm):\r\n class Meta:\r\n model = Eleccion\r\n fields = ('pregunta', 'eleccion_text', 'votes')\r\n def __init__(self, *args, **kwargs):\r\n super(EleccionForm, self).__init__(*args, **kwargs)\r\n for field in self.fields:\r\n self.fields[field].widget.attrs.update({'class': 'form-control'})","sub_path":"aplicaciones/encuestas/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"626534307","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport jsonfield.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Event',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('name', models.CharField(max_length=100, db_index=True)),\n ('props', jsonfield.fields.JSONField(verbose_name=b'Questions', blank=True)),\n ('begin', models.DateTimeField()),\n ('end', models.DateTimeField()),\n ('is_public', models.BooleanField(default=False, db_index=True)),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Survey',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('props', jsonfield.fields.JSONField()),\n ('is_approved', models.BooleanField(default=False, db_index=True)),\n ('event', models.ForeignKey(to='survey.Event')),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='User',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('auth_type', models.CharField(default=b'facebook', max_length=10, db_index=True, choices=[(b'facebook', b'facebook')])),\n ('uid', models.CharField(max_length=20, db_index=True)),\n ('name', models.CharField(max_length=100, db_index=True)),\n ('profile_image_url', models.URLField(null=True, blank=True)),\n ('email', models.EmailField(max_length=75, null=True, blank=True)),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='survey',\n name='user',\n field=models.ForeignKey(to='survey.User'),\n preserve_default=True,\n ),\n ]\n","sub_path":"festi/survey/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"537881445","text":"#!/usr/bin/env python\n# -*- coding: iso-8859-1 -*-\n# Copyright (C) 2013-18 Dr. Ralf Schlatterbeck Open Source Consulting.\n# Reichergasse 131, A-3411 Weidling.\n# Web: http://www.runtux.com Email: office@runtux.com\n# All rights reserved\n# #*** ************************************************************#\n# This module is licensed under the terms of the BSD 3-Clause License\n# .\n# #*** ***********************************************************#\n# ****************************************************************************\n\ntry :\n from ff_spider.Version import VERSION\nexcept :\n VERSION = None\nfrom distutils.core import setup\n\ndescription = []\nf = open ('README.rst')\nfor line in f :\n description.append (line)\n\nlicense = 'BSD License'\nrq = '>=2.7, <3'\nsetup \\\n ( name = \"ff-spider\"\n , version = VERSION\n , description = \"Spider for wireless community network\"\n , long_description = ''.join (description)\n , license = license\n , author = \"Ralf Schlatterbeck\"\n , author_email = \"rsc@runtux.com\"\n , packages = ['ff_spider']\n , platforms = 'Any'\n , scripts = ['bin/ff_spider_parser', 'bin/ff_spiderpool']\n , python_requires = rq\n , classifiers = \\\n [ 'Development Status :: 5 - Production/Stable'\n , 'License :: OSI Approved :: ' + license\n , 'Operating System :: OS Independent'\n , 'Programming Language :: Python'\n , 'Intended Audience :: Developers'\n , 'Programming Language :: Python :: 2'\n , 'Programming Language :: Python :: 2.7'\n ]\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"40049744","text":"# encoding=utf-8\nimport unittest\nimport random\nfrom HTMLTestRunner import HTMLTestRunner\n\n\nclass SuitTestSequenceFnctions(unittest.TestCase):\n def setUp(self):\n #初始化一个递增序列\n self.seq=range(10)\n print('test start')\n def runText(self):\n #从序列seq中随机选取一个元素\n element=range.choice(self.seq)\n #验证随机元素确实属于列表中\n self.assertTrue(element in self.seq,'ok')\nclass SuitTestDictValueFormatFubctions(unittest.TestCase):\n def setUp(self):\n self.seq=range(10)\n def test_shuyffle(self):\n #随机打乱原seq的顺序\n random.shuffle(self.seq)\n self.seq.sort()\n self.assertEqual(self.seq,range(10))\n #验证执行函数时抛出了TypError异常\n self .assertRaises(TypeError,random.shuffle,(1,2,3))\n def tearDown(self):\n print('test end')\nif __name__ == '__main__':\n # unittest.main()\n suite1=unittest.TestLoader().loadTestsFromTestCase(SuitTestDictValueFormatFubctions)\n suite2=unittest.TestLoader().loadTestsFromTestCase(SuitTestSequenceFnctions)\n suite=unittest.TestSuite([suite1,suite2])\n filme='C:\\\\Users\\\\黎涛\\Desktop\\\\test0.html'\n fp=open(filme,'wb')\n runner=HTMLTestRunner.HTMLTestRunner(stream=fp,title='Report_title',description='Report_description')\n runner.run(suite)\n\n\n","sub_path":"2017_10_27—测试/Dome/单元测试框架.py","file_name":"单元测试框架.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"587447276","text":"from torch import random\nfrom torch.utils import data\nfrom BFT.dataloaders.dataloader import DataloaderGenerator\nimport torch\n\nfrom DatasetManager.nes.nes_dataset import END_SYMBOL, PAD_SYMBOL, SimpleNESDataset\n\n\nclass NESDataloader(DataloaderGenerator):\n def __init__(self, sequences_size, voices=(0, 1, 2, 3)):\n dataset = SimpleNESDataset(voices=voices,\n sequences_size=sequences_size)\n\n self.sequences_size = sequences_size \n self.features = ['pitch', 'velocity', 'duration', 'time_shift']\n self.num_channels = 4\n super(NESDataloader, self).__init__(dataset=dataset)\n \n # alias\n @property\n def sequence_size(self):\n return self.sequences_size\n \n def get_feature_index(self, feature_name):\n return self.features.index(feature_name)\n \n def write(self, x, path):\n tensor_dict = {feature: x[:, feature_index] for feature_index, feature in\n enumerate(self.features)}\n score = self.dataset.generate_midi(tensor_dict)\n score.write(f'{path}.mid')\n print(f'File {path}.mid written')\n\n def get_elapsed_time(self, x):\n \"\"\"\n x is (batch_size, num_events, num_channels)\n \"\"\"\n assert 'time_shift' in self.features\n\n timeshift_indices = x[:, :, self.features.index('time_shift')]\n # convert timeshift indices to their actual duration:\n y = self.dataset.timeshift_indices_to_elapsed_time(\n timeshift_indices, smallest_time_shift=0.02)\n return y.cumsum(dim=-1)\n\n def dataloaders(self,\n batch_size,\n num_workers=0,\n shuffle_train=True,\n shuffle_val=False):\n dataloaders = self.dataset.data_loaders(batch_size,\n shuffle_train=shuffle_train,\n shuffle_val=shuffle_val,\n num_workers=num_workers)\n\n def _build_dataloader(dataloader):\n for data in dataloader:\n x = torch.stack([data[e] for e in self.features], dim=-1)\n ret = {'x': x}\n yield ret\n\n dataloaders = [\n _build_dataloader(dataloaders[split])\n for split in ['train', 'val', 'test']\n ]\n return dataloaders","sub_path":"BFT/dataloaders/nes_dataloader.py","file_name":"nes_dataloader.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"66500866","text":"# # LSTM demonstration\n# An LSTM (Long Short-Term Memory) network is a special kind of recurrent neural network that attempts to address many\n# of the problems with the basic model. See [here](https://colah.github.io/posts/2015-08-Understanding-LSTMs/) for an\n# excellent introduction and step-by-step analysis.\n#\n# ## Libraries\n# We user [gensim](https://radimrehurek.com/gensim/index.html) to load the pre-trained word-embeddings; we use\n# [torch](https://pytorch.org/), the PyTorch library, to implement the LSTM network; we use\n# [pandas](https://pandas.pydata.org/pandas-docs/stable/index.html) to load the tsv file containing the dataset; we use\n# [numpy](https://numpy.org/) to handle arrays; we use [tqdm](https://tqdm.github.io/) to display a graphic gauge of the\n# progress as we train our network over the epochs; finally, we use\n# [sklearn](https://scikit-learn.org/stable/index.html), SciKit-Learn, to compute the performance metrics.\n\nimport torch\nimport gensim as gensim\nfrom tqdm import tqdm\nfrom src.model import EnsembleModel\nfrom src.util import TweetDataset, test, evaluateModel\n\n# ## Pre-run initialisation\n# We instantiate an object containing the pre-trained word embeddings and an object representing our classification\n# model.\n\nembeddings = \"model.bin\"\nembeddings = gensim.models.KeyedVectors.load_word2vec_format(embeddings, binary=True)\nembeddings_tensor = torch.FloatTensor(embeddings.vectors)\nmodel = EnsembleModel(embeddings_tensor=embeddings_tensor)\n\n# We split our dataset into training, validation and test fractions\n\ntraining_set = TweetDataset(ds_loc=\"data/covid_training.tsv\", embeddings_model=embeddings)\ntr = len(training_set)\nvd = int(tr * 0.10)\ntr = tr - vd\ntraining_set, validation_set = torch.utils.data.random_split(training_set, [tr, vd])\ntest_set = TweetDataset(ds_loc=\"data/covid_test_public.tsv\", embeddings_model=embeddings)\n\n# We set the configuration of the hyperparameters involved in the training run. For the training run, we will use\n# [Adam](https://machinelearningmastery.com/adam-optimization-algorithm-for-deep-learning/) as our optimiser, which is a\n# variant of Stochastic Gradient Descent, and Cross-Entropy as our cost function.\n\nn_epochs = 15\nlearning_rate = 1e-4\noptimiser = torch.optim.AdamW(model.parameters(), lr=learning_rate)\ncriterion = torch.nn.CrossEntropyLoss()\n\n# `DataLoader` is a class that takes a `Dataset` object and returns a generator able to produce data batches. In this\n# case, we are using the default batch size of 1\n\ntrain_dl = torch.utils.data.DataLoader(training_set)\nvalid_dl = torch.utils.data.DataLoader(validation_set)\ntest_dl = torch.utils.data.DataLoader(test_set)\n\n# ## Training run\n# We train for a number of epochs equal to `n_epochs`, calculating the loss at each step, computing the gradient\n# associated to each operation in the `EnsembleModel` object, and updating all model parameters. At each step, we store\n# the values of the prediction and the target in order to compute the performance metrics at the end of each epoch. We\n# perform a validation run at the end of each epoch in order to get an idea of how our model behaves before data it has\n# never encountered and tune our hyperparameters accordingly.\n\nfor epoch in tqdm(range(1, n_epochs + 1), desc='Epochs', leave=True):\n tr_pred = []\n tr_target = []\n vd_pred = []\n vd_target = []\n for datum in tqdm(train_dl, desc='Training set', leave=False):\n input = datum[0]\n target = datum[1].squeeze(0)\n model.train()\n optimiser.zero_grad()\n prediction = model(input)\n loss = criterion(prediction, target)\n loss.backward()\n optimiser.step()\n\n tr_pred.append(prediction.squeeze())\n tr_target.append(target)\n prec, rec, f1 = test(tr_pred, tr_target)\n # print(\"Epoch:\", epoch)\n # print(\"TRAINING: Precision: {0:.3g}\\tRecall: {1:.3g}\\tF1: {2:.3g}\".format(prec, rec, f1))\n with torch.no_grad():\n for datum in tqdm(valid_dl, desc='Validation set', leave=False):\n input = datum[0]\n target = datum[1].squeeze(0)\n model.eval()\n optimiser.zero_grad()\n prediction = model(input)\n\n vd_pred.append(prediction.squeeze())\n vd_target.append(target)\n prec, rec, f1 = test(vd_pred, vd_target)\n # print(\"VALIDATION: Precision: {0:.3g}\\tRecall: {1:.3g}\\tF1: {2:.3g}\".format(prec, rec, f1))\n\n# ## Model evaluation\n# Using the test fraction of our dataset, we evaluate our fully-trained, fully-tuned LSTM model. We use list\n# comprehension to make things faster, selecting only the first element of each datapoint in the test set dataloader\n# to make the prediction; recall each datapoint consists of a tuple containing the tweet and the annotation.\n\nmodel.eval()\nwith torch.no_grad():\n test_prediction = [model(d[0]).squeeze() for d in tqdm(test_dl, desc=\"Test set\")]\n test_target = [d[1] for d in test_dl]\n prec, rec, f1 = test(test_prediction, test_target, dataset=test_set, out=True)\n evaluateModel(model, test_prediction, test_target)\n print(\"TEST: Precision: {0:.4}\\tRecall: {1:.4}\\tF1: {2:.4}\".format(prec, rec, f1))\n","sub_path":"lstm-for-covid-disinformation-master/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"619280537","text":"import pickle\n\n# Load mode dari file\nfilename = \"olah-data-predik.data\"\nwith open(filename, 'rb') as file:\n model = pickle.load(file)\n\n#contoh default data\ndefault = [[ 0.087, 0.030, 0.024, 10.756 ]]\nprint(\"contoh data default pendapatan\")\nprint(default)\n\n#baca input\nx1 = input(\"Entry tahun 2010: \")\nx2 = input(\"Entry tahun 2011: \")\nx3 = input(\"Entry tahun 2012: \")\nx4 = input(\"Entry tahun 2013: \")\n\nXuji = [[ float(x1), float(x2), float(x3), float(x4) ]]\n\n#prediksi dari model\nprint(model.predict(Xuji))","sub_path":"tugas3/prediksi.py","file_name":"prediksi.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"156949500","text":"# %%\ndef parser(filename):\n with open(filename) as f:\n deck1, deck2 = f.read().split(\"\\n\\n\")\n deck1 = [int(i) for i in deck1.split('\\n')[1:]]\n deck2 = [int(i) for i in deck2.split('\\n')[1:]]\n return deck1, deck2\n\n# %%\ndef deck_score(deck):\n n = len(deck)\n return sum((n - i) * card for i, card in enumerate(deck))\n\n# %%\ndef play_combat(deck1, deck2, recursive=True):\n p, q = list(deck1), list(deck2)\n mem = set()\n\n while len(p) != 0 and len(q) != 0: \n idx = f\"{deck_score(p)}-{deck_score(q)}\"\n if idx in mem:\n # This round has already been played\n return 0, []\n mem.add(idx)\n \n a, b = p.pop(0), q.pop(0)\n if recursive and len(p) >= a and len(q) >= b:\n # Play recursive game\n winner, _ = play_combat(p[:a], q[:b])\n \n else:\n # Not enough card in their deck (or version 1 w/o recursion)\n winner = 0 if a > b else 1\n \n if winner == 0:\n p.extend([a, b])\n else:\n q.extend([b, a])\n\n return (0, p) if p else (1, q)\n\n#%%\nif __name__ == \"__main__\":\n from os.path import dirname, join, realpath\n folder = join(dirname(dirname(realpath(__file__))), \"data\") \n p, q = parser(f\"{folder}/day22.txt\")\n\n print(\"Part 1 —\", deck_score(play_combat(p, q, recursive=False)[1]))\n print(\"Part 1 —\", deck_score(play_combat(p, q, recursive=True)[1]))\n\n","sub_path":"src/day22.py","file_name":"day22.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"342685321","text":"from typing import Optional, Sequence, Tuple\n\nimport torch\nimport torch.nn as nn\nimport torchvision.transforms.functional as F\n\n\nfrom torchvision.transforms import Normalize, Compose, RandomResizedCrop, InterpolationMode, ToTensor, Resize, \\\n CenterCrop\n\n\nclass ResizeMaxSize(nn.Module):\n\n def __init__(self, max_size, interpolation=InterpolationMode.BICUBIC, fn='max', fill=0):\n super().__init__()\n if not isinstance(max_size, int):\n raise TypeError(f\"Size should be int. Got {type(max_size)}\")\n self.max_size = max_size\n self.interpolation = interpolation\n self.fn = min if fn == 'min' else min\n self.fill = fill\n\n def forward(self, img):\n if isinstance(img, torch.Tensor):\n height, width = img.shape[:2]\n else:\n width, height = img.size\n scale = self.max_size / float(max(height, width))\n if scale != 1.0:\n new_size = tuple(round(dim * scale) for dim in (height, width))\n img = F.resize(img, new_size, self.interpolation)\n pad_h = self.max_size - new_size[0]\n pad_w = self.max_size - new_size[1]\n img = F.pad(img, padding=[pad_w//2, pad_h//2, pad_w - pad_w//2, pad_h - pad_h//2], fill=self.fill)\n return img\n\n\ndef _convert_to_rgb(image):\n return image.convert('RGB')\n\n\ndef image_transform(\n image_size: int,\n is_train: bool,\n mean: Optional[Tuple[float, ...]] = None,\n std: Optional[Tuple[float, ...]] = None,\n resize_longest_max: bool = False,\n fill_color: int = 0,\n):\n mean = mean or (0.48145466, 0.4578275, 0.40821073) # OpenAI dataset mean\n std = std or (0.26862954, 0.26130258, 0.27577711) # OpenAI dataset std\n if isinstance(image_size, (list, tuple)) and image_size[0] == image_size[1]:\n # for square size, pass size as int so that Resize() uses aspect preserving shortest edge\n image_size = image_size[0]\n\n normalize = Normalize(mean=mean, std=std)\n if is_train:\n return Compose([\n RandomResizedCrop(image_size, scale=(0.9, 1.0), interpolation=InterpolationMode.BICUBIC),\n _convert_to_rgb,\n ToTensor(),\n normalize,\n ])\n else:\n if resize_longest_max:\n transforms = [\n ResizeMaxSize(image_size, fill=fill_color)\n ]\n else:\n transforms = [\n Resize(image_size, interpolation=InterpolationMode.BICUBIC),\n CenterCrop(image_size),\n ]\n transforms.extend([\n _convert_to_rgb,\n ToTensor(),\n normalize,\n ])\n return Compose(transforms)\n","sub_path":"kosmos-2/open_clip/src/open_clip/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"429734766","text":"#\n# Copyright 2012 eNovance \n# Copyright 2012 Red Hat, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom ceilometer.compute import pollsters\nfrom ceilometer.compute.pollsters import util\nfrom ceilometer.compute.virt import inspector as virt_inspector\n# from ceilometer.i18n import _, _LW, _LE\nfrom ceilometer.i18n import _\nfrom ceilometer.openstack.common import log\nfrom ceilometer import sample\n\nLOG = log.getLogger(__name__)\n\n\nclass InstancePollster(pollsters.BaseComputePollster):\n\n @staticmethod\n def get_samples(manager, cache, resources):\n for instance in resources:\n yield util.make_sample_from_instance(\n instance,\n name='instance',\n type=sample.TYPE_GAUGE,\n unit='instance',\n volume=1,\n )\n\n\nclass InstanceFlavorPollster(pollsters.BaseComputePollster):\n\n @staticmethod\n def get_samples(manager, cache, resources):\n for instance in resources:\n yield util.make_sample_from_instance(\n instance,\n # Use the \"meter name + variable\" syntax\n name='instance:%s' %\n instance.flavor['name'],\n type=sample.TYPE_GAUGE,\n unit='instance',\n volume=1,\n )\n\n\nclass InstanceSystemInfoPollster(pollsters.BaseComputePollster):\n\n def get_samples(self, manager, cache, resources):\n for instance in resources:\n LOG.debug(_('Checking system info for instance %s'),\n instance.id)\n try:\n sys_info = self.inspector.inspect_system_info(instance)\n if sys_info is None:\n raise NotImplementedError\n sys_info = str(sys_info)\n sys_meta = {'system_info': sys_info}\n LOG.debug(_(\"SYSTEM INFO: %(instance)s %(sys_info)s\"),\n ({'instance': instance.__dict__,\n 'sys_info': sys_info}))\n\n yield util.make_sample_from_instance(\n instance,\n name='instance.system.info',\n type=sample.TYPE_GAUGE,\n unit='instance',\n volume=1,\n additional_metadata=sys_meta,\n )\n except virt_inspector.InstanceNoQGAException as err:\n # Instance was deleted while getting samples. Ignore it.\n LOG.debug(_('QEMU-GUEST-AGENT is not installed or'\n ' started in %s'), instance.id)\n except virt_inspector.InstanceNotFoundException as err:\n # Instance was deleted while getting samples. Ignore it.\n LOG.debug(_('Exception while getting samples %s'), err)\n except NotImplementedError:\n # Selected inspector does not implement this pollster.\n LOG.debug(_('Cannot get System Info for instance %s, '\n 'maybe it is not implemented in qemu-guest-agent'\n ), instance.id)\n except Exception as err:\n LOG.exception(_('Could not get System info for '\n '%(id)s: %(e)s'), {'id': instance.id,\n 'e': err})\n\n\nclass InstanceOOMStatusPollster(pollsters.BaseComputePollster):\n\n def get_samples(self, manager, cache, resources):\n for instance in resources:\n LOG.debug(_('Checking oom status for instance %s'),\n instance.id)\n try:\n oom_info = self.inspector.inspect_oom_status(instance)\n if oom_info is None:\n raise NotImplementedError\n if oom_info is True:\n oom_status = 1\n else:\n oom_status = 0\n\n LOG.debug(_(\"OOM STATUS: %(instance)s %(oom_status)d\"),\n ({'instance': instance.__dict__,\n 'oom_status': oom_status}))\n\n yield util.make_sample_from_instance(\n instance,\n name='instance.oom.status',\n type=sample.TYPE_GAUGE,\n unit='instance',\n volume=oom_status,\n )\n except virt_inspector.InstanceNoQGAException as err:\n # Instance was deleted while getting samples. Ignore it.\n LOG.debug(_('QEMU-GUEST-AGENT is not installed or'\n ' started in %s'), instance.id)\n except virt_inspector.InstanceNotFoundException as err:\n # Instance was deleted while getting samples. Ignore it.\n LOG.debug(_('Exception while getting samples %s'), err)\n except NotImplementedError:\n # Selected inspector does not implement this pollster.\n LOG.debug(_('Cannot get OOM Status for instance %s, '\n 'maybe it is not implemented in qemu-guest-agent'\n ), instance.id)\n except Exception as err:\n LOG.exception(_('Could not get OOM Status for '\n '%(id)s: %(e)s'), {'id': instance.id,\n 'e': err})\n\n\nclass InstanceAppStatsPollster(pollsters.BaseComputePollster):\n\n def get_samples(self, manager, cache, resources):\n for instance in resources:\n LOG.debug(_('Checking app stats for instance %s'),\n instance.id)\n try:\n app_info = self.inspector.inspect_app_stats(instance)\n if app_info is None:\n raise NotImplementedError\n app_stats = str(app_info)\n app_meta = {'app_stats': app_stats}\n\n LOG.debug(_(\"APP STATS: %(instance)s %(app_stats)s\"),\n ({'instance': instance.__dict__,\n 'app_stats': app_stats}))\n\n yield util.make_sample_from_instance(\n instance,\n name='instance.app.stats',\n type=sample.TYPE_GAUGE,\n unit='instance',\n volume=1,\n additional_metadata=app_meta,\n )\n except virt_inspector.InstanceNoQGAException as err:\n # Instance was deleted while getting samples. Ignore it.\n LOG.debug(_('QEMU-GUEST-AGENT is not installed or'\n ' started in %s'), instance.id)\n except virt_inspector.InstanceNotFoundException as err:\n # Instance was deleted while getting samples. Ignore it.\n LOG.debug(_('Exception while getting samples %s'), err)\n except NotImplementedError:\n # Selected inspector does not implement this pollster.\n LOG.debug(_('Cannot get APP Stats for instance %s, '\n 'maybe it is not implemented in qemu-guest-agent'\n ), instance.id)\n except Exception as err:\n LOG.exception(_('Could not get APP Stats for '\n '%(id)s: %(e)s'), {'id': instance.id,\n 'e': err})\n\n\nclass InstancePingDelayPollster(pollsters.BaseComputePollster):\n\n def get_samples(self, manager, cache, resources):\n for instance in resources:\n LOG.debug(_('Checking ping delay for instance %s'),\n instance.id)\n try:\n delay = self.inspector.inspect_ping_delay(instance)\n if delay is None:\n raise NotImplementedError\n if delay == '':\n # set timeout = 999\n delay = 999\n else:\n try:\n delay = float(delay)\n except Exception:\n delay = 999\n\n LOG.debug(_(\"PING DELAY: %(instance)s %(delay)f\"),\n ({'instance': instance.__dict__,\n 'delay': delay}))\n\n yield util.make_sample_from_instance(\n instance,\n name='instance.ping.delay',\n type=sample.TYPE_GAUGE,\n unit='instance',\n volume=delay,\n )\n except virt_inspector.InstanceNoQGAException as err:\n # Instance was deleted while getting samples. Ignore it.\n LOG.debug(_('QEMU-GUEST-AGENT is not installed or'\n ' started in %s'), instance.id)\n except virt_inspector.InstanceNotFoundException as err:\n # Instance was deleted while getting samples. Ignore it.\n LOG.debug(_('Exception while getting samples %s'), err)\n except NotImplementedError:\n # Selected inspector does not implement this pollster.\n LOG.debug(_('Cannot get Ping Delay for instance %s, '\n 'maybe it is not implemented in qemu-guest-agent'\n ), instance.id)\n except Exception as err:\n LOG.exception(_('Could not get Ping Delay for '\n '%(id)s: %(e)s'), {'id': instance.id,\n 'e': err})\n\n\nclass InstanceUserCheckPollster(pollsters.BaseComputePollster):\n\n def get_samples(self, manager, cache, resources):\n # to be implements\n yield\n","sub_path":"compute/pollsters/instance.py","file_name":"instance.py","file_ext":"py","file_size_in_byte":10220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"513183643","text":"#! /env python\n# _*_ coding:utf8 _*_\n# @author:ren\n# @date:2018/5/8.13:09\n\nfrom models import *\nfrom flask_restful import Resource\nfrom flask_restful import fields, marshal_with, reqparse, abort\n\ntags_fields = {\n 'id': fields.String,\n 'name':fields.String,\n 'create_at': fields.DateTime,\n 'tag_pic':fields.String\n}\n\nclass TagsAPI(Resource):\n def __init__(self):\n # 参数解析\n self.reqparse = reqparse.RequestParser()\n super(TagsAPI, self).__init__()\n","sub_path":"resources/tags.py","file_name":"tags.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"356602498","text":"# -*- coding: utf-8 -*-\n\"\"\"Functions to call when running the function.\n\nThis module should contain a function called `run_module` that is executed\nwhen the module is run with `python -m MODULE_NAME`.\n\"\"\"\n\nimport datetime\nimport logging\n\nimport pandas as pd\n\nfrom delphi_utils import (\n read_params,\n S3ArchiveDiffer,\n add_prefix,\n create_export_csv\n)\n\nfrom .data_tools import format_for_export\nfrom .pull_api import GoogleHealthTrends, get_counts_states, get_counts_dma\nfrom .map_values import derived_counts_from_dma\nfrom .constants import (SIGNALS, RAW, SMOOTHED,\n MSA, HRR, STATE, DMA,\n PULL_START_DATE)\n\n\ndef run_module():\n \"\"\"Main function run when calling the module.\n\n Inputs parameters from the file 'params.json' and produces output data in\n the directory defined by the `export_dir` (should be \"receiving\" expect for\n testing purposes).\n \"\"\"\n\n # read parameters\n params = read_params()\n ght_key = params[\"ght_key\"]\n start_date = params[\"start_date\"]\n end_date = params[\"end_date\"]\n static_dir = params[\"static_file_dir\"]\n export_dir = params[\"export_dir\"]\n data_dir = params[\"data_dir\"]\n wip_signal = params[\"wip_signal\"]\n cache_dir = params[\"cache_dir\"]\n\n arch_diff = S3ArchiveDiffer(\n cache_dir, export_dir,\n params[\"bucket_name\"], \"ght\",\n params[\"aws_credentials\"])\n arch_diff.update_cache()\n print(arch_diff)\n # if missing start_date, set to today (GMT) minus 5 days\n if start_date == \"\":\n now = datetime.datetime.now(datetime.timezone.utc)\n start_date = (now - datetime.timedelta(days=4)).strftime(\"%Y-%m-%d\")\n\n # if missing start_date, set to today (GMT) minus 5 days\n if start_date == \"\":\n now = datetime.datetime.now(datetime.timezone.utc)\n start_date = (now - datetime.timedelta(days=4)).strftime(\"%Y-%m-%d\")\n\n # if missing end_date, set to today (GMT) minus 5 days\n if end_date == \"\":\n now = datetime.datetime.now(datetime.timezone.utc)\n end_date = (now - datetime.timedelta(days=4)).strftime(\"%Y-%m-%d\")\n\n # Turn on basic logging messages (level INFO)\n logging.basicConfig(format=\"%(levelname)s:%(message)s\", level=logging.INFO)\n logging.info(\"Creating data from %s through %s.\", start_date, end_date)\n\n # Dictionary mapping geo resolution to the data corresponding to that resolution.\n df_by_geo_res = {}\n\n if not params[\"test\"]:\n # setup class to handle API calls\n ght = GoogleHealthTrends(ght_key=ght_key)\n\n # read data frame version of the data\n df_by_geo_res[STATE] = get_counts_states(\n ght, PULL_START_DATE, end_date, static_dir=static_dir, data_dir=data_dir\n )\n df_by_geo_res[DMA] = get_counts_dma(\n ght, PULL_START_DATE, end_date, static_dir=static_dir, data_dir=data_dir\n )\n else:\n df_by_geo_res[STATE] = pd.read_csv(params[\"test_data_dir\"].format(geo_res=\"state\"))\n df_by_geo_res[DMA] = pd.read_csv(params[\"test_data_dir\"].format(geo_res=\"dma\"))\n\n df_by_geo_res[HRR], df_by_geo_res[MSA] = derived_counts_from_dma(df_by_geo_res[DMA],\n static_dir=static_dir)\n\n signal_names = add_prefix(SIGNALS, wip_signal, prefix=\"wip_\")\n\n for signal in signal_names:\n is_smoothed = signal.endswith(SMOOTHED)\n for geo_res, df in df_by_geo_res.items():\n create_export_csv(format_for_export(df, is_smoothed),\n geo_res=geo_res,\n sensor=signal,\n start_date=start_date,\n export_dir=export_dir)\n\n if not params[\"test\"]:\n # Diff exports, and make incremental versions\n _, common_diffs, new_files = arch_diff.diff_exports()\n\n # Archive changed and new files only\n to_archive = [f for f, diff in common_diffs.items() if diff is not None]\n to_archive += new_files\n _, fails = arch_diff.archive_exports(to_archive)\n\n # Filter existing exports to exclude those that failed to archive\n succ_common_diffs = {f: diff for f, diff in common_diffs.items() if f not in fails}\n arch_diff.filter_exports(succ_common_diffs)\n\n # Report failures: someone should probably look at them\n for exported_file in fails:\n print(f\"Failed to archive '{exported_file}'\")\n","sub_path":"google_health/delphi_google_health/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":4460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"278734592","text":"import wx\n\nfrom PDFMerger.Controls import Controls\nfrom PDFMerger.Frame import Frame\nfrom PDFMerger.Table import Table2 as Table\n\n\nclass App(wx.App):\n \"\"\"Application Class\"\"\"\n def OnInit(self):\n \"\"\"OnInit: Gets auto called on object construction\"\"\"\n \n self.frame = Frame()\n self.controls = Controls()\n \n self._x = 20\n self._y = 60\n self._panelSize = wx.Size(430,120)\n self._startPosition = wx.Point(20,20)\n \n self._controls = wx.Panel(self.frame, id=wx.ID_ANY, \n pos=wx.Point(0,0),\n size=(-1,-1),\n style=wx.SIMPLE_BORDER, name=wx.PanelNameStr)\n self.createMenuBar(self.frame)\n self.createControls(self._controls)\n self.createTable(self._controls,wx.Point(20,120))\n\n self.frame.Show()\n self.SetTopWindow(self.frame)\n return True\n\n def createMenuBar(self,parent):\n menubar = wx.MenuBar()\n fileMenu = wx.Menu()\n fileItem = fileMenu.Append(wx.ID_EXIT, 'Quit', 'Quit application')\n menubar.Append(fileMenu, '&File')\n parent.SetMenuBar(menubar)\n parent.Bind(wx.EVT_MENU, self.OnQuit, fileItem)\n \n def OnQuit(self, event):\n self.frame.Close()\n \n def createControls(self,parent):\n \"\"\"add more controls\"\"\"\n\n # - Add Button\n self._addBtn = wx.Button(self._controls, id=wx.ID_ANY,\n label=\"Add\", pos=wx.Point(self._x, self._y + 20),\n size=wx.DefaultSize, style=0,\n validator=wx.DefaultValidator,\n name=wx.ButtonNameStr)\n self._addBtn.Bind(wx.EVT_BUTTON, self.controls._onAdd)\n \n # - Up Button\n self._upBtn = wx.Button(self._controls, id=wx.ID_ANY,\n label=\"Up\", pos=wx.Point(self._x + 100, self._y + 20),\n size=wx.DefaultSize, style=0,\n validator=wx.DefaultValidator,\n name=wx.ButtonNameStr)\n self._upBtn.Bind(wx.EVT_BUTTON, self.controls._onUp)\n \n # - Down Button\n self._downBtn = wx.Button(self._controls, id=wx.ID_ANY,\n label=\"Down\", pos=wx.Point(self._x + 2*100, self._y + 20),\n size=wx.DefaultSize, style=0,\n validator=wx.DefaultValidator,\n name=wx.ButtonNameStr)\n self._downBtn.Bind(wx.EVT_BUTTON, self.controls._onDown)\n \n # - Remove Button\n self._removeBtn = wx.Button(self._controls, id=wx.ID_ANY,\n label=\"Remove\", pos=wx.Point(self._x + 3*100, self._y + 20),\n size=wx.DefaultSize, style=0,\n validator=wx.DefaultValidator,\n name=wx.ButtonNameStr)\n\n self._removeBtn.Bind(wx.EVT_BUTTON, self.controls._onRemove)\n return self._panelSize\n \n def createTable(self,parent, position):\n self._table = Table(parent,position)\n ","sub_path":"PDF_Merger/PDFMerger/App.py","file_name":"App.py","file_ext":"py","file_size_in_byte":3183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"223467478","text":"\n\"\"\"\nsummary_MOC-EXT.py\n\nProduce a plot of change in average latitude of QP ice extent/overturning cell divide under perturbation experiment.\n\nAnna FitzMaurice\n2015\n\n\"\"\"\n#Import libraries\nimport netCDF4 as nc\nimport os,sys\nimport numpy as np\nimport matplotlib.pylab as plt\nfrom matplotlib import gridspec\n\n############# things to set #############\n\nsuite1 = 'IceExport_Jan2015'\ndate = '2016_spring'\npert_set = ('control','pert1','pert2','pert3','pert4','pert4b','pert5','pert5b','pert6')\nparam_vec = [0.0, 0.1, 0.15, 0.2, 0.25, 0.3]\nparam_label = 'Perturbation to wind stress on ice (Pa)'\n\n########################################\n\npath2save_fig = '/work/Anna.FitzMaurice/figures/'+date+'/poster_figures/'\nif not os.path.lexists(path2save_fig):\n\tos.makedirs(path2save_fig)\n\nfigure_name = os.path.join(path2save_fig,'summary_EXT_20yr.png')\n\n########################################\n\n#Code for calculating control 20yr integrated temperatures\n\"\"\"\nfirst_year_set = ('3001','3021','3041','3061','3081')\nlast_year_set = ('3020','3040','3060','3080','3100')\n\nn = 4\nfirst_year = first_year_set[n]\nlast_year = last_year_set[n]\nperiod_length = '20'\nperiod_length_no = 20\n\nprint first_year+'-'+last_year\n\nmax_lat = 0.\nprint 'max lat: ', max_lat\n\nocean_data_static = nc.Dataset('/archive/apf/fre/siena_201204/CM2G_LM3/IceExport_Jan2015/CM2G_LM3_IceExport_Jan2015_control/gfdl.ncrc2-default-prod-openmp/pp/ocean/ocean.static.nc')\nocean_data = nc.Dataset('/archive/apf/fre/siena_201204/CM2G_LM3/IceExport_Jan2015/CM2G_LM3_IceExport_Jan2015_control/gfdl.ncrc2-default-prod-openmp/pp/ocean/av/annual_'+period_length+'yr/ocean.'+first_year+'-'+last_year+'.ann.nc')\n\nyh = ocean_data.variables['yh'][:]\nymax = np.where(yh > max_lat)[0][0] + 1\n\nSST = ocean_data.variables['SST'][0,0:ymax,:]\nxt = ocean_data.variables['xh'][:]\nyt = ocean_data.variables['yh'][0:ymax]\nwet = ocean_data_static.variables['wet'][0:ymax,:]\n\nSST = np.ma.masked_where(wet==0.,SST)\nSST_mean = np.ma.mean(SST)\n\natmos_data = nc.Dataset('/archive/apf/fre/siena_201204/CM2G_LM3/IceExport_Jan2015/CM2G_LM3_IceExport_Jan2015_control/gfdl.ncrc2-default-prod-openmp/pp/atmos/av/annual_'+period_length+'yr/atmos.'+first_year+'-'+last_year+'.ann.nc')\n\nlat = atmos_data.variables['lat'][:]\nymax_atmos = np.where(lat > max_lat)[0][0] + 1\n\nSAT = atmos_data.variables['t_surf'][0,0:ymax_atmos,:]\nland_mask = atmos_data.variables['land_mask'][0:ymax_atmos,:]\n\n#SAT = np.ma.masked_where(land_mask==1.,SAT)\nSAT_mean = np.ma.mean(SAT)-273.15\n\nprint 'mean SST: ', SST_mean\nprint 'mean SAT: ', SAT_mean\n\nSH_SST = [15.12,15.34,15.07,15.24,15.18]\nSH_SAT = [4.04,4.43,3.88,4.31,4.08]\n\"\"\"\n####################################################\n\noverturning50 = [-63.2,-63.2,-63.4,-63.0,-63.1,-63.4]\noverturning250 = [-61.4,-61.1,-61.2,-60.2,-60.1,-60.3]\n\nice75 = [-70.0,-69.8,-69.2,-69.0,-68.9,-68.9]\nice25 = [-64.3,-64.1,-62.8,-62.3,-62.2,-62.4]\nice10 = [-62.7,-62.6,-61.3,-60.8,-60.8,-60.9]\n\n#20yr aves\nvc = [0.,0.,0.,0.,0.]\noverturning_apex_control = [-63.8,-63.4,-62.6,-62.9,-63.8]\noverturning_100m_control = [-63.1,-63.1,-62.3,-62.6,-63.2]\noverturning_200m_control = [-62.2,-62.2,-61.2,-62.1,-62.1]\nice25_control = [-63.8,-65.4,-63.4,-65.2,-63.8]\n\nv4 = [0.1,0.1,0.1]\noverturning_apex_pert4 = [-64.1,-64.4,-63.1]\noverturning_100m_pert4 = [-62.3,-61.8,-62.7]\noverturning_200m_pert4 = [-61.9,-61.3,-62.1]\nice25_pert4 = [-62.8,-63.4,-64.5]\n\nv4b = [0.15,0.15,0.15,0.15,0.15]\noverturning_apex_pert4b = [-63.5,-62.7,-63.1,-62.6,-64.8]\noverturning_100m_pert4b = [-62.0,-61.6,-62.2,-62.1,-63.1]\noverturning_200m_pert4b = [-61.4,-60.4,-61.1,-61.3,-61.4]\nice25_pert4b = [-62.8,-62.4,-62.2,-63.1,-63.1]\n\nv5 = [0.2,0.2,0.2,0.2]\noverturning_apex_pert5 = [-62.9,-62.6,-63.4,-63.5]\noverturning_100m_pert5 = [-61.3,-61.6,-62.4,-62.3]\noverturning_200m_pert5 = [-60.4,-60.9,-60.8,-61.1]\nice25_pert5 = [-62.3,-63.1,-62.6,-62.0]\n\nv5b = [0.25,0.25,0.25,0.25,0.25]\noverturning_apex_pert5b = [-63.1,-63.1,-63.6,-63.4,-63.8]\noverturning_100m_pert5b = [-61.5,-61.8,-62.2,-62.0,-62.0]\noverturning_200m_pert5b = [-60.4,-60.4,-61.1,-60.7,-60.9]\nice25_pert5b = [-62.1,-62.4,-62.2,-62.3,-62.2]\n\nv6 = [0.3,0.3,0.3,0.3,0.3]\noverturning_apex_pert6 = [-63.1,-64.3,-63.3,-63.2,-63.6]\noverturning_100m_pert6 = [-62.1,-63.2,-62.7,-62.7,-62.5]\noverturning_200m_pert6 = [-60.6,-60.7,-60.9,-61.6,-61.6]\nice25_pert6 = [-62.4,-62.2,-61.9,-62.7,-62.4]\n\n############# plotting #################\n\n######\nfig = plt.figure(figsize=(7,5))\n######\n\n#for n in range(len(param_vec)):\n#\tplt.plot([param_vec[n],param_vec[n]],[overturning50[n],overturning250[n]],color='#000080',linewidth=2)\n\t\n#y1 = plt.plot(param_vec,overturning50,color='#000080',marker='x')\n#y2 = plt.plot(param_vec,overturning250,color='#000080',marker='x')\n#plt.fill_between(param_vec, overturning50, overturning250, color='#000080', alpha=0.2)\n\n#y3 = plt.plot(param_vec,ice75,color='#cc0000',marker='x')\n#y4 = plt.plot(param_vec,ice25,color='#cc0000',marker='x')\n#y5 = plt.plot(param_vec,ice10,color='#cc0000',marker='x')\n#plt.fill_between(param_vec, ice25, ice10,color='#cc0000', alpha=0.2)\n\ncolor_list = ['#1f7a7a','#006699','#4d4d4d']\n\nv = np.concatenate((vc,v4,v4b,v5,v5b,v6))\napex = np.concatenate((overturning_apex_control,overturning_apex_pert4,overturning_apex_pert4b,overturning_apex_pert5,overturning_apex_pert5b,overturning_apex_pert6))\nd100 = np.concatenate((overturning_100m_control,overturning_100m_pert4,overturning_100m_pert4b,overturning_100m_pert5,overturning_100m_pert5b,overturning_100m_pert6))\nd200 = np.concatenate((overturning_200m_control,overturning_200m_pert4,overturning_200m_pert4b,overturning_200m_pert5,overturning_200m_pert5b,overturning_200m_pert6))\n\napex_means = [np.mean(overturning_apex_control),np.mean(overturning_apex_pert4),np.mean(overturning_apex_pert4b),np.mean(overturning_apex_pert5),np.mean(overturning_apex_pert5b),np.mean(overturning_apex_pert6)]\nd100_means = [np.mean(overturning_100m_control),np.mean(overturning_100m_pert4),np.mean(overturning_100m_pert4b),np.mean(overturning_100m_pert5),np.mean(overturning_100m_pert5b),np.mean(overturning_100m_pert6)]\nd200_means = [np.mean(overturning_200m_control),np.mean(overturning_200m_pert4),np.mean(overturning_200m_pert4b),np.mean(overturning_200m_pert5),np.mean(overturning_200m_pert5b),np.mean(overturning_200m_pert6)]\n\n\napex_std = np.std(overturning_apex_control)\nd100_std = np.std(overturning_100m_control)\nd200_std = np.std(overturning_200m_control)\n\n\nplt.fill_between(param_vec, apex_means-apex_std, apex_means+apex_std,color='0.3', alpha=0.2)\n\"\"\"\nplt.fill_between(param_vec, d100_means-d100_std, d100_means+d100_std,color=color_list[1], alpha=0.2)\nplt.fill_between(param_vec, d200_means-apex_std, d200_means+d200_std,color=color_list[2], alpha=0.2)\n\"\"\"\nplt.scatter(v,apex,color='0.3',marker='x',s=20)\n\"\"\"\nplt.scatter(v,d100,color=color_list[1],marker='x',s=20)\nplt.scatter(v,d200,color=color_list[2],marker='x',s=20)\n\"\"\"\n\nl1, = plt.plot(param_vec,apex_means,color='0.3',marker='',linewidth=2)\n\n\"\"\"\nl2, = plt.plot(param_vec,d100_means,color=color_list[1],marker='',linewidth=2)\nl3, = plt.plot(param_vec,d200_means,color=color_list[2],marker='',linewidth=2)\n\"\"\"\n\nplt.scatter(v,np.concatenate((ice25_control,ice25_pert4,ice25_pert4b,ice25_pert5,ice25_pert5b,ice25_pert6)),color='#006699',marker='x',s=20)\n\nice_means = [np.mean(ice25_control),np.mean(ice25_pert4),np.mean(ice25_pert4b),np.mean(ice25_pert5),np.mean(ice25_pert5b),np.mean(ice25_pert6)]\nice_std = np.std(ice25_control)\nplt.plot(param_vec,ice_means,color='#006699',marker='',linewidth=2) #'#800000'\nplt.fill_between(param_vec, ice_means-ice_std, ice_means+ice_std,color='#006699', alpha=0.2)\n\nplt.xlabel(param_label,fontsize=14)\nplt.xticks(param_vec,fontsize=14)\nplt.ylabel('Average Latitude',fontsize=14)\nplt.yticks(fontsize=14)\nplt.xlim((-0.05,0.35))\n\n#lgd1 = plt.legend((l1, l2, l3), ('Outcrop max extent','Outcrop at 100m','Outcrop at 200m'),ncol=3,fontsize=12, loc=3) #loc = 9 , bbox_to_anchor=(0.5,-0.1)\n\nplt.tight_layout()\n\n#########################################\n\n# save figure\n#plt.savefig(figure_name,bbox_inches='tight') #,bbox_extra_artists=(lgd1,)\n\n##########################################\n\nplt.show()\n\n\n","sub_path":"summary_MOC-EXT.py","file_name":"summary_MOC-EXT.py","file_ext":"py","file_size_in_byte":8134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"434250989","text":"import logging\n\nfrom tor_core.helpers import clean_id\nfrom tor_core.helpers import flair\n\n\ndef flair_post(post, text):\n \"\"\"\n Sets the requested flair on a given post. Must provide a string\n which matches an already-available flair template.\n\n :param post: A Submission object on ToR.\n :param text: String. The name of the flair template to apply.\n :return: None.\n \"\"\"\n # Flair looks like this:\n # {\n # 'flair_css_class': 'unclaimed-flair',\n # 'flair_template_id': 'fe9d6950-142a-11e7-901e-0ecc947f9ff4',\n # 'flair_text_editable': False,\n # 'flair_position': 'left',\n # 'flair_text': 'Unclaimed'\n # }\n for choice in post.flair.choices():\n if choice['flair_text'] == text:\n post.flair.select(\n flair_template_id=choice['flair_template_id']\n )\n return\n\n # if the flairing is successful, we won't hit this line.\n logging.error(\n 'Cannot find requested flair {}. Not flairing.'.format(text)\n )\n\n\ndef update_user_flair(post, config):\n \"\"\"\n On a successful transcription, this takes the user's current flair,\n increments the counter by one, and stores it back to the subreddit.\n\n :param post: The post which holds the author information.\n :param config: The global config instance.\n :return: None.\n \"\"\"\n flair_text = '0 Γ - Beta Tester'\n\n try:\n # The post object is technically an inbox mention, even though it's\n # a Comment object. In order to get the flair, we have to take the\n # ID of our post object and re-request it from Reddit in order to\n # get the *actual* object, even though they have the same ID. It's\n # weird.\n user_flair = config.r.comment(id=clean_id(post.fullname)).author_flair_text\n except AttributeError:\n user_flair = flair_text\n\n if user_flair is None:\n user_flair = flair_text\n\n if 'Γ' in user_flair:\n # take their current flair and add one to it\n new_flair_count = int(user_flair[:user_flair.index('Γ') - 1])\n # if there's anything special in their flair string, let's save it\n additional_flair_text = user_flair[user_flair.index('Γ') + 1:]\n user_flair = '{} Γ'.format(new_flair_count + 1)\n # add in that special flair bit back in to keep their flair intact\n user_flair += additional_flair_text\n config.tor.flair.set(post.author, text=user_flair, css_class='grafeas')\n logging.info('Setting flair for {}'.format(post.author))\n else:\n # they're bot or a mod and have custom flair. Leave it alone.\n return\n\n\ndef set_meta_flair_on_other_posts(config):\n \"\"\"\n Loops through the 10 newest posts on ToR and sets the flair to\n 'Meta' for any post that is not authored by the bot or any of\n the moderators.\n\n :param config: the active config object.\n :return: None.\n \"\"\"\n for post in config.tor.new(limit=10):\n\n if (\n post.author != config.r.redditor('transcribersofreddit') and\n post.author not in config.tor_mods and\n post.link_flair_text != flair.meta\n ):\n logging.info(\n 'Flairing post {} by author {} with Meta.'.format(\n post.fullname, post.author\n )\n )\n flair_post(post, flair.meta)\n","sub_path":"tor/helpers/flair.py","file_name":"flair.py","file_ext":"py","file_size_in_byte":3359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"563407546","text":"# Uses python3\nimport timeit\ndef Flast(𝑛):\n if n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n Fiblist = [0,1]\n start = timeit.default_timer()\n for j in range(2,n+1):\n Fiblist[j%2] = sum(Fiblist)%10\n stop = timeit.default_timer()- start\n if stop > 5:\n break\n return Fiblist[n%2]\nn = int(input())\nprint(Flast(n))","sub_path":"ALGORITHMIC TOOLBOX/3/FIBLAST.py","file_name":"FIBLAST.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"623503590","text":"from operator import mul\nfrom functools import reduce\n\ndef cmb(n,r):\n r = min(n-r,r)\n if r == 0: return 1\n over = reduce(mul, range(n, n - r, -1))\n under = reduce(mul, range(1,r + 1))\n return over // under\n\ndef main():\n N, A, B = map(int, input().split())\n v = list(map(int, input().split()))\n sort_v = sorted(v, reverse=True)\n ave = 0\n for i in range(A):\n ave += sort_v[i]\n ave = ave / A\n ans = 0\n if sort_v[0] == sort_v[A-1]:\n cnt = v.count(sort_v[A-1])\n for i in range(A, min(cnt,B)+1):\n ans += cmb(cnt,i)\n else:\n cnt = v.count(sort_v[A-1])\n ind = sort_v.index(sort_v[A-1])\n ans = cmb(cnt, A - ind)\n print(ave)\n print(ans)\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"Python_codes/p03776/s340644925.py","file_name":"s340644925.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"647465462","text":"import json\nimport urllib\nimport sei_py.base\nimport sei_py.helpers\n\n\nclass ExamAPI(object):\n def __init__(self, http_context, exam_id):\n self._base_url = '{api_url}/exams/{exam_id}'.format(api_url=sei_py.base.UrlProvider.getApi(), exam_id=exam_id)\n self._http_context = http_context\n\n def get(self, **kwargs):\n query_string = sei_py.helpers.generate_query_string(kwargs)\n res = self._http_context.get('{base}{query}' \\\n .format(base=self._base_url, query=query_string))\n return res.json()\n\n def save(self, exam_json):\n include = []\n if exam_json.get('settings'):\n include.append('settings')\n\n query_string = sei_py.helpers.generate_query_string({'include': ','.join(include)})\n\n res = self._http_context.put('{base}{query}' \\\n .format(base=self._base_url, query=query_string), \\\n data=json.dumps(exam_json), headers={'content-type': 'application/json'})\n return res.json()\n\n def get_settings(self):\n return self.get(include='settings').get('settings')\n\n def put_settings(self, settings_json):\n exam_json = {\n 'settings': settings_json\n }\n return self.save(exam_json).get('settings')\n\n def create_lauchpad(self, **kwargs):\n launchpad_json = kwargs.get('launchpad')\n exam = kwargs.get('exam', self.get(include='settings'))\n settings = exam.get('settings')\n launchpads = settings.get('launchpads', [])\n launchpads.append(launchpad_json)\n settings['launchpads'] = launchpads\n new_settings = self.put_settings(settings)\n exam['settings'] = new_settings\n return '{launchpad_url}/{slug}/{name}' \\\n .format(launchpad_url=sei_py.base.UrlProvider.getLaunchpad(), slug=exam.get('slug'), \\\n name=urllib.parse.quote(launchpad_json.get('name').lower()))\n","sub_path":"sei_py/rest/exam.py","file_name":"exam.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"419172222","text":"from __init__ import *\nimport matplotlib.pyplot as plt\n\ndef toImg(pixels):\n\t\"\"\"\n\tEach image is 28 pixels in height and 28 pixels in width, for a total of 784 pixels in total.\n\tThe input should be a matrix and each length of each row is 784 and each entry in the row should be an integer between 0-255\n\t\"\"\"\n\tpix_len = np.size(pixels, 1)\n\tpix_num = np.size(pixels, 0)\n\timg_row = np.ceil(pix_num / 10.0)\n\tif pix_len != 784:\n\t\tprint(\"toImg: input error.\")\n\t\treturn False\n\n\tfig = plt.figure()\n\tfor i in range(0, pix_num):\n\t\timg = pixels[i].reshape(28, 28)\n\t\tplt.subplot(img_row, 10, i+1)\n\t\tplt.axis('off')\n\t\tplt.imshow(img, cmap='gray')\n\n\tplt.show()\n\n\treturn True\n\n\n\nfilename = \"./data/train.csv\"\n[lables, pixels] = read_dataframe(filename)\n\t\n\"\"\"\npicture the first 100 images\n\"\"\"\ntoImg(pixels[0:100,:])","sub_path":"csv2img.py","file_name":"csv2img.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"99363232","text":"#! /usr/bin/env python\n\n\n\nimport argparse\nimport os, sys, time\nimport telnetlib\n\n\nclass spawn:\n def __init__(self,ipaddr=None,ipPort=5025):\n \"\"\" Simple script to simply control RS attenuator\n utilizes basic socket interface to talk to RS attenuator\n \"\"\"\n if ipaddr == None: ipaddr = \"192.168.100.13\"\n if ipPort == None: ipPort = 5025\n \n self.atn = telnetlib.Telnet(ipaddr,ipPort,timeout=10)\n\n\n def expcmd(self,cmd,slp=0.25):\n self.atn.write('%s\\n' % cmd)\n time.sleep(slp)\n if cmd.find('?') > -1: ans = self.atn.read_until('\\n')\n else: ans = None\n return ans\n \n def onecmd(self,cmd,val=None):\n ans = None\n if val !=None: self.expcmd('%s %s' % (cmd,val))\n else: ans = self.expcmd('%s?' % cmd)\n\n if ans:\n try: ans = float(ans)\n except: pass\n return ans\n\n def freq(self,val=None):\n if val != None:\n f = self.onecmd('freq','%sMHz' % val)\n else:\n f = self.onecmd('freq')\n if f: f = f/1.0e6\n return f\n \n def pow(self,pwr=None):\n return self.onecmd('sour:pow',pwr)\n\n def rfon(self,mode=None):\n return self.onecmd('outp',mode)\n\n def modon(self,mode=None):\n return self.onecmd('sour:iq:stat',mode)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"controls the RS SGS100A in a few limited ways\")\n parser.add_argument('--ipaddr', default=None, help=\"ipAddress or Hostname\")\n parser.add_argument('--port', default=5025, help=\"socket port\") \n parser.add_argument('--freq', default=None, help=\"center frequency in MHz\")\n parser.add_argument('--pwr', default=None, type=float, help=\"output power dbm\") \n parser.add_argument('--rfon', default=None, action=\"store_true\", help=\"rf enable\")\n parser.add_argument('--rfoff', default=None, action=\"store_true\", help=\"rf disable\")\n\n args = parser.parse_args()\n sg = spawn(args.ipaddr,args.port)\n\n if args.freq: sg.freq(args.freq)\n if args.pwr != None: sg.pow(args.pwr)\n if args.rfon: sg.rfon(1)\n if args.rfoff: sg.rfon(0)\n\n print('rf output enabled: %s' % sg.rfon())\n print('rf freq MHz: %s' % sg.freq())\n print('rf power: %s' % sg.pow())\n\n \n","sub_path":"rsSgs100.py","file_name":"rsSgs100.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"517469338","text":"from tkinter import *\n\ndef QuitGame(event=None):\n regionFrame.destroy()\n\n\nregionFrame = Tk()\nregionFrame.geometry(\"1280x720\")\nregionFrame.title(\"End Screen\")\n\ndef background():\n EndScreen = Canvas(regionFrame, width = 1280, height = 720, bg = \"dark red\")\n #EndScreen.create_rectangle(0, 0, 1280, 720, fill=\"dark red\")\n EndScreen.pack()\n\nEndText = Label(regionFrame, text=\"Game Over\", font=\"times\")\nEndText.pack(side=\"top\")\n\nQuitButton = Button(regionFrame, text=\"Quit\", font=\"times\",\n width=1280, height=2, command=QuitGame).pack(side=\"top\")\n\n#coincounter text\nLandmarkCounter = Label(regionFrame, text=\"Landmarks visited = \", font=\"times\")\nLandmarkCounter.pack(side=\"top\")\n\nbackground()\nregionFrame.mainloop()\n","sub_path":"endscreen.py","file_name":"endscreen.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"218611366","text":"def bubble():\n for i in range(len(numbers)-1):\n j=0\n while jnumbers[j+1]:\n numbers[j],numbers[j+1]=numbers[j+1],numbers[j]\n coor[j],coor[j+1]=coor[j+1],coor[j]\n j=j+1\n\ndef build(before_sides,n,after):\n if n==K:\n sides.append(before_sides)\n return\n if after==len(coor):\n return\n i=after\n while i0:\n if len(sides[len(sides)-1])==K:\n return\n i=i+1\n\n\nnums=input().split(\" \")\nN=int(nums[0])\nM=int(nums[1])\nK=int(nums[2])\nls=[]\nnumbers=[]\nfor i in range(N):\n this=input().split(\" \")\n while len(this)>M:\n del this[len(this)-1]\n this=[int(x) for x in this]\n ls.append(this)\ncoor=[]\nfor i in range(len(ls)):\n for j in range(len(ls[i])):\n now=ls[i][j]\n numbers.append(now)\n coor.append([i,j])\nbubble()\nsides=[]\nfor i in range(len(coor)):\n this_sides=[coor[i]]\n build(this_sides,1,1)\n if len(sides) > 0:\n if len(sides[len(sides) - 1]) == K:\n break\nresult=sides[0]\nindex=result[K-1]\ni=index[0]\nj=index[1]\nprint(ls[i][j])\n\n\n\n\n\n\n","sub_path":"Code/CodeRecords/2331/60796/283941.py","file_name":"283941.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"70895620","text":"#!/usr/bin/env python\n\n# encoding: utf-8\n\n\"\"\"\n@author: swensun\n\n@github:https://github.com/yunshuipiao\n\n@software: python\n\n@file: test.py\n\n@desc: 股票的线性回归预测\n\n@hint:\n\"\"\"\nimport tushare as ts\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nsns.set()\n\nlength = 30\n\nfrom sklearn import ensemble, linear_model, tree, svm, neighbors\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\n\nclass Stock:\n def __init__(self, name, code):\n self.code = code\n self.name = name\n\nstocks_code = [\n # Stock(\"kedaxunfei\", \"002230\"),\n # Stock(\"dazujiguang\", \"002008\"),\n # Stock(\"haikangweishi\", \"002415\"),\n # Stock(\"hengshengdianzi\", \"600570\"),\n Stock(\"ziguangguoxin\", \"002049\"),\n Stock(\"zhongkeshuguang\", \"603019\"),\n Stock(\"longjigufen\", \"601012\"),\n # Stock(\"yiligufen\", \"600887\"),\n Stock(\"yongyouwangluo\", \"600588\"),\n Stock(\"dongfangguoxin\", \"300166\"),\n Stock(\"lagnchaoxinxi\", \"000977\"),\n\n\n # Stock(\"zhlaoshangyinhang\", \"600036\"),\n # Stock(\"zhongguopinan\", \"601318\"),\n # Stock(\"shengheziyuan\", \"600392\"),\n]\n\ndef high_low_p():\n for s in stocks_code:\n stock_data = ts.get_k_data(s.code)\n print(stock_data.tail(5))\n # print(stock_data.head(10))\n stock_data = stock_data.as_matrix()\n # p_change = stock_data[:, 6]\n # p_change[:-1] = p_change[1:]\n stock_close_data = stock_data[:, 2]\n stock_close_data[1:] = stock_close_data[:-1]\n stock_data = stock_data[-length:, :]\n stock_close_data = stock_close_data[-length:]\n high_change = (stock_data[:, 3] - stock_close_data) / stock_close_data * 100\n # print(high_change[0:5])\n low_change = (stock_data[:, 4] - stock_close_data) / stock_close_data * 100\n # print(low_change[0:5])\n # print(stock_data[0:5, :])\n x = np.arange(0, length)\n plt.plot(x, high_change, \"ro-\", label=\"high\")\n plt.plot(x, low_change, \"go-\", label=\"low\")\n my_y_ticks = np.arange(-7, 8, 1)\n plt.yticks(my_y_ticks)\n # plt.plot(x, stock_data[:, 6], \"bo-\", label=\"p\")\n plt.title(s.name)\n plt.legend()\n plt.show()\n\ndef deal_time():\n for s in stocks_code:\n stock_data = ts.get_hist_data(s.code, ktype='5')\n # stock_data.to_csv('data.csv')\n stock_data = stock_data.as_matrix()\n plt.plot(stock_data[:, 0])\n plt.show()\n\n\n\nif __name__ == '__main__':\n high_low_p()\n # deal_time()\n\n # total_data = stock_data\n # total_data = np.column_stack((total_data, p_change))\n\n # reg = linear_model.LinearRegression()\n # x_train, x_test, y_train, y_test = train_test_split(total_data, high_change, test_size=0.2, random_state=0)\n # reg.fit(x_train, y_train)\n # score = reg.score(x_test, y_test)\n # print(score)\n # latest_data = total_data[0:10, :]\n # latest_data = latest_data.mean(axis=0)\n # latest_data = latest_data.reshape(1, -1)\n # result = reg.predict(latest_data)\n # print(result)\n # print(high_change[0:10])\n","sub_path":"stocks/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"302581846","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n#\n# settings.py -\n#\n# Author: Bao Hexing \n# Created: 29 March 2018\n#\n# Copyright © 2018, Bao Hexing. All Rights Reserved.\n\nclass Settings():\n '''Game Settings'''\n\n def __init__(self):\n self.bgcolor = (234, 234, 234)\n self.screen_width = 1200\n self.screen_height = 800\n","sub_path":"pygame/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"450720930","text":"with open(\"input\", \"r\") as fo:\n listInput = fo.read()\n\nnumbers = {int(x): i+1 for i, x in enumerate(listInput.split(',')[:-1])}\n\n\ndef getNumber(last, index):\n if last in numbers:\n number = index - numbers[last]\n numbers[last] = index\n return number\n\n numbers[last] = index\n return 0\n\n\n# same as part 1 except of the limit\nlimit = 30000000\n\nlastSpoken = int(listInput.split(',')[-1])\n\nfor i in range(len(numbers) + 1, limit):\n lastSpoken = getNumber(lastSpoken, i)\n\nprint(lastSpoken)\n","sub_path":"15/15_2.py","file_name":"15_2.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"484548290","text":"import cherrypy\nfrom mako.template import Template\nfrom mako.lookup import TemplateLookup\nfrom google.appengine.ext import db\nfrom datetime import *\n\n# database imports\nfrom data import *\n\n# app imports\nfrom users import *\nfrom projects import *\n\nclass Dashboard(object):\n \n @cherrypy.expose\n def index(self):\n \n # authentication required\n authen = Users()\n user = authen.checkLoggedIn()\n \n # get all projects\n allProjects = DataProject.all()\n allProjects.filter(\"assocUser\", user.key())\n \n # get unpaid invoices\n allInvoices = DataInvoice.all()\n allInvoices.filter(\"paid\", False)\n \n # get timesheets\n getTimesheets = DataTimesheet.all()\n \n # pass everything in one templateData variable\n class Struct(): pass\n templateData = Struct()\n templateData.projects = allProjects\n templateData.invoices = allInvoices.order(\"due\")\n templateData.today = datetime.now().date()\n \n # render the template\n templateFile = \"templates/dashboard.html\"\n templateLookup = TemplateLookup(directories=['.'])\n template = Template(filename=templateFile, lookup=templateLookup)\n return template.render(dash = templateData, \n timesheets = getTimesheets, \n user = user)","sub_path":"dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"488993438","text":"import numpy as np\n\ndef ADD(x1,x2):\n w1,w2,theta=0.5,0.5,0.7\n tmp=x1*w1+x2*w2\n if tmp>theta:\n return 1\n else:\n return 0\n\n# 单层感知机表示与门,或门,与非门\ndef AND(x1,x2):\n x=np.array([x1,x2])\n w=np.array([0.5,0.5])\n b=-0.7\n tmp=np.sum(w*x)+b\n if tmp>0:\n return 1\n else:\n return 0\ndef NAND(x1,x2):\n x=np.array([x1,x2])\n w=np.array([-0.5,-0.5])\n b=0.7\n tmp=np.sum(w*x)+b\n if tmp>0:\n return 1\n else:\n return 0\ndef OR(x1,x2):\n x=np.array([x1,x2])\n w=np.array([0.5,0.5])\n b=-0.2\n tmp=np.sum(w*x)+b\n if tmp>0:\n return 1\n else:\n return 0\n\n# 多层感知机表示异或门\ndef XOR(x1,x2):\n s1=NAND(x1,x2)\n s2=OR(x1,x2)\n y=AND(s1,s2)\n return y\n","sub_path":"chapter2-perceptron.py","file_name":"chapter2-perceptron.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"364711949","text":"\"\"\"This file contains the SeqMutator class. It allows one to input a sequence with .\"\"\"\n\nfrom tempfile import mkstemp\nfrom shutil import move\nimport os\nfrom RosettaSub import RosettaSingleProcess\n\n\nclass SeqMutator:\n\n def __init__(self, base):\n self.seq_lists = {'r': \"rna_seqs.txt\", 'd': \"dna_seqs.txt\"}\n self.base_dir = base # base_pdb should be set to the pdb that is missing the respective DNA or RNA\n self.new_seqs = list()\n\n self.cs_dict = {\"4UN3/ChainA\": ('r', 0, 81, '', ''),\n \"4UN3/ChainC\": ('d', 0, 'n', 'rc','TGGTATTG'),\n \"4UN3/ChainD\": ('d', 17, 'n', '','TGGTATTG'),\n \"4UN4/ChainA\": ('r', 0, 81, '',''),\n \"4UN4/ChainC\": ('d', 17, 'n', 'rc','TGGTATTG'),\n \"4UN4/ChainD\": ('d', 18, 'n', '','TGGTATTG'),\n \"4UN4/ChainE\": ('d', 0, 17, 'rc',''),\n \"4UN5/ChainA\": ('r', 0, 81, '',''),\n \"4UN5/ChainC\": ('d', 20, 'n', 'rc','TGGTATTG'),\n \"4UN5/ChainD\": ('d', 18, 'n', '','TGGTATTG'),\n \"4UN5/ChainE\": ('d', 0, 17, 'rc',''),\n \"5FQ5/ChainA\": ('r', 0, 81, '',''),\n \"5FQ5/ChainC\": ('d', 19, 'n', 'rc','TGGTATTG'),\n \"5FQ5/ChainD\": ('d', 18, 'n', '','TGGTATTG'),\n \"5FQ5/ChainE\": ('d', 0, 17, 'rc',''),\n \"4OO8ABC/ChainB\": ('r', 0, 'n', '',''),\n \"4OO8ABC/ChainC\": ('d', 0, 'n', 'rc','')\n }\n\n for chain in self.cs_dict:\n self.read_in_seqs(chain) # STEP 1: get the RNA/DNA mutation sequences and put them in the SOLO folder\n\n def read_in_seqs(self, chain_name):\n base_pdb = self.base_dir + chain_name + \".pdb\"\n output_directory = self.base_dir + chain_name + \"_MUT\" + \"/\"\n seq_file = self.seq_lists[self.cs_dict[chain_name][0]]\n f = open(self.base_dir + seq_file)\n for line in f:\n s = line[:-1].split(\"\\t\") # index 0: sequence_id; index 1: sequence for rosetta\n ix = self.cs_dict[chain_name]\n # check for addendum:\n qs = s[1] + ix[4]\n # check if the 'n' exists then just use the first index:\n if ix[2] == 'n':\n seq = qs[ix[1]:]\n # check for revcomness:\n if ix[3] == 'rc':\n seq = self.revcom(seq)\n else:\n seq = qs[ix[1]:ix[2]] # grabs second index and the length of the sequence for the structure\n if ix[3] == 'rc':\n seq = self.revcom(seq)\n print(seq)\n rr = RosettaSingleProcess(\"rna_thread.default.macosclangrelease\")\n rr.set_inputs(\n [\"-s\", base_pdb, \"-seq\", seq.lower(), \"-o\", output_directory + s[0] + \".pdb\"])\n rr.run_process()\n f.close()\n self.change_chain_name(output_directory)\n\n\n # Changes the chain name to be consistent with the new PDB file:\n def change_chain_name(self, out_dir):\n new_chain_char = out_dir[-6]\n os.chdir(out_dir)\n for p_file in os.listdir(os.curdir):\n # Create temp file\n if p_file[:2] == \"r_\" or p_file[:2] == \"d_\":\n print(\"changing file: \" + p_file)\n fh, abs_path = mkstemp()\n with os.fdopen(fh, 'w') as new_file:\n with open(p_file) as old_file:\n for line in old_file:\n if line.find(\"TER\") != -1 or line.find(\"HET\") != -1:\n new_file.write(line)\n else:\n new_line = line[:21] + new_chain_char + line[22:] # chain char at position 21\n new_file.write(new_line)\n # Remove original file\n os.remove(p_file)\n # Move new file\n move(abs_path, p_file)\n\n def revcom(self, sequence, complement=False):\n retseq = \"\"\n change = {'A': 'T',\n 'T': 'A',\n 'G': 'C',\n 'C': 'G'}\n for nt in sequence:\n rnt = change[nt.upper()]\n if complement:\n retseq += rnt\n else:\n retseq = rnt + retseq\n return retseq\n\n\nSeqMutator(base=\"/Users/brianmendoza/Desktop/RosettaCRISPR_Relaxed1/\")\nSeqMutator(base=\"/Users/brianmendoza/Desktop/RosettaCRISPR_Relaxed2/\")\nSeqMutator(base=\"/Users/brianmendoza/Desktop/RosettaCRISPR_Relaxed3/\")\n\n","sub_path":"SeqMutatorOff.py","file_name":"SeqMutatorOff.py","file_ext":"py","file_size_in_byte":4649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"368143882","text":"\nfrom openerp import fields, models, exceptions, api, _\nimport base64\nimport csv\nimport cStringIO\n\n\nclass ImportGondola(models.TransientModel):\n _name = 'import.gondola'\n _description = 'Import Gondola'\n\n data = fields.Binary('File', required=True)\n name = fields.Char('Filename')\n delimeter = fields.Char('Delimeter', default=',', help='Default delimeter is \",\"')\n #location = fields.Many2one('stock.location', 'Default Location', required=True)\n\n @api.one\n def action_import(self):\n \"\"\"Load Product data from the CSV file.\"\"\"\n ctx = self._context\n product_obj = self.env['gondola']\n if not self.data:\n raise exceptions.Warning(_(\"You need to select a file!\"))\n # Decode the file data\n data = base64.b64decode(self.data)\n file_input = cStringIO.StringIO(data)\n file_input.seek(0)\n\n if self.delimeter:\n delimeter = str(self.delimeter)\n else:\n delimeter = ','\n\n reader = csv.reader(file_input, delimiter=delimeter,lineterminator='\\r\\n')\n\n for row in reader:\n product_ids = product_obj.search([('code','=', row[0])])\n if not product_ids:\n vals = {}\n vals.update({'code': row[0]})\n vals.update({'name': str(row[1])})\n res = self.env['gondola'].create(vals)\n\n\n","sub_path":"01-jakc/ranch_project/wizard/import_gondola.py","file_name":"import_gondola.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"90239777","text":"import scrapy\n\n\n# spider for dealer.com templated websites\nclass FordDealeronSpider(scrapy.Spider):\n name = 'ford_dealeron'\n\n dealers = [\n {\n \"name\": 'North Bay Ford',\n \"url\": 'https://www.northbayford.com/searchnew.aspx?Type=N&Year=2021&Make=Ford&Model=Mustang%20Mach-E',\n \"settings\": {}\n },\n {\n \"name\": 'Watsonville Ford',\n \"url\": 'https://www.watsonvilleford.com/searchnew.aspx?Type=N&Year=2021&Make=Ford&Model=Mustang%20Mach-E',\n \"settings\": {}\n },\n {\n \"name\": 'Ford Store Morgan Hill',\n \"url\": 'https://www.fordstoremorganhill.com/car-dealer-san-jose-ca.html?Bodystyle=Premium&Model=Mustang+Mach-E&Year=2021',\n \"settings\": {}\n },\n {\n \"name\": 'Mission Valley Ford',\n \"url\": 'https://www.missionvalleyford.com/searchnew.aspx?Type=N&Make=Ford&Model=Mustang%20Mach-E',\n \"settings\": {}\n },\n {\n \"name\": 'James Ford (Half Moon Bay)',\n \"url\": 'https://www.jamesford.com/searchnew.aspx?Type=N&Model=Mustang%20Mach-E',\n \"settings\": {}\n },\n {\n \"name\": 'Future Ford (Concord)',\n \"url\": 'https://www.futurefordofconcord.com/searchnew.aspx?Make=Ford&Model=Mustang%20Mach-E',\n \"settings\": {}\n },\n {\n \"name\": 'Napa Ford',\n \"url\": 'https://www.napaford.com/cars-for-sale-napa-ca.html?Model=Mustang+Mach-E',\n \"settings\": {}\n },\n {\n \"name\": 'Santos Ford',\n \"url\": 'https://www.santosford.net/searchnew.aspx?Model=Mustang%20Mach-E',\n \"settings\": {}\n },\n {\n \"name\": 'Greenwood Ford (Hollister)',\n \"url\": 'https://www.teamgreenwoodford.com/searchnew.aspx?Model=Mustang+Mach-E',\n \"settings\": {}\n },\n {\n \"name\": 'Woodland Ford',\n \"url\": 'https://www.woodlandford.com/searchnew.aspx?Model=Mustang%20Mach-E',\n \"settings\": {}\n },\n {\n \"name\": 'Price Ford',\n \"url\": 'https://www.pricefordsales.com/searchnew.aspx?Model=Mustang%20Mach-E',\n \"settings\": {}\n },\n {\n \"name\": 'Murdock Ford (Santaquin)',\n \"url\": 'https://www.murdockford.com/searchnew.aspx?Model=Mustang%20Mach-E',\n \"settings\": {}\n },\n {\n \"name\": 'Performance Ford (Bountiful)',\n \"url\": 'https://www.performancefordbountiful.com/searchnew.aspx?Model=Mustang%20Mach-E',\n \"settings\": {}\n },\n {\n \"name\": 'Future Ford (Roseville)',\n \"url\": 'https://www.futureford.com/searchnew.aspx?Model=Mustang%20Mach-E',\n \"settings\": {}\n },\n {\n \"name\": 'Future Ford (Sacramento)',\n \"url\": 'https://www.futurefordofsacramento.com/searchnew.aspx?Model=Mustang%20Mach-E',\n \"settings\": {}\n },\n ]\n\n def start_requests(self):\n for dealer in self.dealers:\n name, url, settings = dealer['name'], dealer['url'], dealer['settings']\n yield scrapy.Request(url=url, callback=self.parse, cb_kwargs=dict(dealer_name=name, settings=settings))\n\n def parse(self, response, dealer_name, settings):\n UL_XPATH = '//*[@class=\"row srpVehicle hasVehicleInfo\"]'\n\n for ul in response.xpath(UL_XPATH):\n yield {\n 'dealer': dealer_name,\n 'url': response.url,\n 'title': \"%s %s %s %s\" % (ul.xpath('@data-year').get(), ul.xpath('@data-make').get(),\n ul.xpath('@data-model').get(), ul.xpath('@data-trim').get()),\n 'msrp': ul.xpath('@data-msrp').get(),\n 'price': ul.xpath('@data-price').get(),\n 'vin': ul.xpath('@data-vin').get(),\n 'exteriorColor': ul.xpath('@data-extcolor').get(),\n 'interiorColor': ul.xpath('@data-intcolor').get(),\n 'engine': ul.xpath('@data-engine').get(),\n 'trim': ul.xpath('@data-trim').get(),\n 'options': []\n }\n\n # NEXT_PAGE = '/html/body/div[3]/div[2]/div/div[3]/div[2]/div[2]/form/div/div[3]/div/div/div[2]/ul/li[3]/a/@href'\n # next_page = response.xpath(NEXT_PAGE).get()\n # if next_page is not None:\n # yield response.follow(next_page, self.parse)\n\n# StratosDealerEngine\n# scraping logic\n# curl -XGET -H 'Content-Type: application/json' https://www.flemingtonbmw.com/api/InventoryWidget/Galleria/?vin=5UXTY9C03L9C80441\n\n# get options: 'https://www.flemingtonbmw.com/vehicleoptionscomments.aspx?id=5214&vin=5UXTY9C03L9C80441'\n","sub_path":"dealers/dealers/spiders/ford_dealeron.py","file_name":"ford_dealeron.py","file_ext":"py","file_size_in_byte":4764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"372801659","text":"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import String\nfrom std_msgs.msg import Time\nfrom datetime import datetime\nimport time\nfrom solution.srv import timeserver, timeserverResponse\n\nlocal_time = Time() #init a variable with \"ROS Time\" type\n\ndef hourToSec(hour):\n return hour*60*60\n\ndef handleTimeCal(req):\n global local_time\n # there may be a case that user send time request even before \"local_time\" topic receive data \n if local_time != None:\n remote = Time()\n # list of choise\n if req.location == \"Japan\":\n # time differece here is 11 hour\n remote = local_time - rospy.Duration(hourToSec(11))\n elif req.location == \"Australia\":\n # time differece here is 10 hour\n remote = local_time - rospy.Duration(hourToSec(10))\n \n rospy.loginfo(\"There is a request coming from Kent at \" + str(datetime.fromtimestamp(local_time.to_sec())))\n #return result back to client\n return timeserverResponse(remote)\n\n\n\ndef clientTimeCallback(data):\n #give value to global variable for later use\n global local_time\n local_time = data.data\n \ndef main():\n #init node\n rospy.init_node('worldTimeServer', anonymous=True)\n #init subscriber\n rospy.Subscriber(\"local_time\", Time, clientTimeCallback)\n #init server\n worldServer = rospy.Service('worldTime', timeserver, handleTimeCal)\n\n rospy.spin()\n\nif __name__ == '__main__':\n main()","sub_path":"src/ros_ws/src/solution/scripts/hw3_server.py","file_name":"hw3_server.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"296171703","text":"import dns.resolver\nimport dns.name\nimport dns.query\nimport dns.zone\nimport dns.message\nimport modules.baseRecon as baseRecon\n\nclass DomainNode(baseRecon.Node):\n def getType(self):\n return \"domain\"\n \n def TextToDomainMorph(node):\n newNode =DomainNode(node,{\"name\":node.values['name']},node.log)\n node.children.append(newNode)\n return [newNode]\n\n baseRecon.TextNode.registerMorph('toDomain',TextToDomainMorph)\n\n def DomainToNSMorph(node):\n n = dns.name.from_text(node.values['name'])\n try:\n node.log.log(\"Trying \"+n.to_text())\n while True:\n try:\n answers = dns.resolver.query(n,'NS')\n except:\n node.log.log(\">>No NS records found for \"+n.to_text()+\". Trying parent\")\n pass\n else:\n node.log.log(\">>Found NS records for \"+n.to_text()+\":\")\n retNodes = []\n for rdata in answers:\n newNode = NSNode(node,{\"name\":rdata.to_text(),\"domain\":node.values['name']},node.log)\n node.children.append(newNode)\n retNodes.append(newNode)\n return retNodes\n break\n n = n.parent()\n node.log.log(\"Trying \"+n.to_text())\n except dns.name.NoParent:\n node.log.log(\"Reached root\")\n\n def DomainToIPMorph(node):\n n = dns.name.from_text(node.values['name'])\n try:\n answers = dns.resolver.query(n,'A')\n except:\n node.log.log(\">>No A record found for \"+n.to_text()+\". Trying parent\")\n exit()\n else:\n node.log.log(\">>Found A record for \"+n.to_text()+\":\")\n retNodes = []\n for rdata in answers:\n newNode = baseRecon.IPNode(node,{\"name\":rdata.to_text()},node.log)\n node.children.append(newNode)\n retNodes.append(newNode)\n return retNodes\n\n morphs={\n 'toNS':DomainToNSMorph,\n 'toIP':DomainToIPMorph\n }\n\nclass NSNode(baseRecon.Node):\n def getType(self):\n return \"ns\"\n\n def NSToIPMorph(node):\n n = dns.name.from_text(node.values['name'])\n try:\n answers = dns.resolver.query(n,'A')\n except:\n node.log.log(\">>No A record found for \"+n.to_text()+\". Trying parent\")\n exit()\n else:\n node.log.log(\">>Found A record for \"+n.to_text()+\":\")\n retNodes = []\n for rdata in answers:\n newNode = baseRecon.IPNode(node,{\"name\":rdata.to_text()},node.log)\n node.children.append(newNode)\n retNodes.append(newNode)\n return retNodes\n\n def transferCheck(self):\n try:\n z = dns.zone.from_xfr(dns.query.xfr(self.values['name'],self.values['domain']))\n except:\n self.log.log(\"OK - Transfer failed\")\n return (True,\"Transfer failed\")\n else:\n self.log.log(\"NOK - Transfer succeded\")\n result = \"Transfer succeded\"\n names = z.nodes.keys()\n for n in names:\n self.log.log(z[n].to_text(n))\n result = result+\"\\n\"+z[n].to_text(n)\n return (False,result)\n \n def versionCheck(self):\n myResolver = dns.resolver.Resolver()\n myResolver.timeout = 5\n myResolver.lifetime = 5\n myResolver.nameservers = [ item.address for item in dns.resolver.query(self.values['name'])]\n try:\n answers = myResolver.query('version.bind', 'TXT','CH')\n except:\n self.log.log(\"OK - Unable to retreive NS version\")\n return (True,\"Unable to retreive NS version\")\n else:\n result = \"Server answered :\"\n for rdata in answers:\n self.log.log(\"MAYBE - Server version : \"+rdata.to_text())\n result = result+\"\\n\"+rdata.to_text()\n return (None,result)\n \n def amplificationCheck(self):\n myResolver = dns.resolver.Resolver()\n myResolver.timeout = 5\n myResolver.lifetime = 5\n myResolver.nameservers = [ item.address for item in dns.resolver.query(self.values['name'])]\n try:\n answers = myResolver.query('.')\n except:\n self.log.log(\"OK - NS amplification not working\")\n return (True, \"Amplification not working\")\n else:\n self.log.log(\"MAYBE - Server replied to . query : \")\n result = \"Server replied to . query with :\"\n for rdata in answers:\n self.log.log(rdata.to_text())\n result = result+\"\\n\"+rdata.to_text()\n return (None,result)\n\n checks={\n 'transfer':transferCheck,\n 'version':versionCheck,\n 'ampli':amplificationCheck\n }\n morphs={\n 'toIP':NSToIPMorph\n }\n\ndef getTag():\n return \"ns\"\n\ndef getNodes():\n return {'ns':NSNode,'domain':DomainNode}\n\n","sub_path":"modules/nsRecon.py","file_name":"nsRecon.py","file_ext":"py","file_size_in_byte":5100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"76393390","text":"import os\nimport json\n\nclass Prices:\n\n def __init__(self, prices_path):\n self.prices = {}\n\n if not os.path.exists(prices_path):\n raise Exception(\"no such path: {}\".format(prices_path))\n\n with open(prices_path, \"r\") as prices_file:\n self.prices = json.load(prices_file)\n\n if len(self.prices) == 0:\n raise Exception(\"no prices in '{}'\".format(prices_path))\n\n\n def __iter__(self):\n\n for s in self.prices:\n yield s, self.prices[s]\n\n\n def dump(self):\n for foo in self:\n print(\" {}\".format(foo))\n\n","sub_path":"section5/Prices.py","file_name":"Prices.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"498715940","text":"#!/usr/bin/env python\n\n# Python module for use with carbon-data that will return the \n# 1 minute CPU load average. It will be called at 1 minute intervals\n \nimport platform\nimport time\n\n# Set the interval (in seconds) that this metric will be run\nINTERVAL = 60\n\n# Metric String format:\nHOSTNAME = platform.node().split('.')[0]\n\nPREFIX = HOSTNAME\nSUFFIX = \"cpu-load\"\n\ndef get_metric_data(q_name):\n with open('/proc/loadavg') as f:\n cpu_load = f.read().strip().split()[0]\n timestamp = int(time.time())\n metric_data = [PREFIX + \".\" + SUFFIX, (timestamp, cpu_load)]\n\n q_name.put(metric_data)\n\n","sub_path":"carbon-data/collectors/cpu.py","file_name":"cpu.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"167443247","text":"# vim: set fileencoding=utf-8 filetype=python :\n\nfrom logging import fatal, info\nimport os\n\nsrcdir = '.'\nblddir = '_build_'\n\nsubdirs = ['src/libanidb', 'src/anidbfs']\n\ndef set_options(opt):\n\topt.add_option('--debug', action = 'store_true', default = True,\n\t help = 'Enable debug')\n\n\tfor dir in subdirs:\n\t\topt.sub_options(dir)\n\ndef configure(conf):\n\timport Options\n\n\tconf.check_tool('gcc')\n\n\tfor dir in subdirs:\n\t\tconf.sub_config(dir)\n\ndef build(bld):\n\tbld.add_subdirs(subdirs)\n","sub_path":"wscript","file_name":"wscript","file_ext":"","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"117846254","text":"import itertools\n\nborder = (\"|\")\nfloor = (\"--\")\nstar = (\"*\")\ntriplestar = (\"***\")\n\ndef separ():\n print((\"=\")*30)\n\nsepar()\nprint(\"Welcome to Tic Tac Toe Game\")\nsepar()\nprint(\"{0}GAME RULES{0}\".format(border))\nprint(\"\"\"\nAt first of all you can choose size of game map\nThen each player can place one mark (or stone) per turn on the 3x3 grid.\nThe WINNER is who succeeds in placing stones of their marks in a\n{0} horizontal,\n{0} vertical or\n{0} diagonal row\n\n{1} Let's start the game {1}\n\"\"\".format(star,triplestar))\n\nsepar()\n\ndef win(current_game):\n def all_same(l):\n if l.count(l[0]) == len(l) and l[0] != 0:\n return True\n else:\n return\n\n# Horizontal win\n for row in game:\n print(row)\n if all_same(row):\n print(f\"Player {row[0]} is the winner horizontally!\")\n return True\n\n# Diagonals win\n diags = []\n for col, row in (enumerate(range(len(game)))):\n diags.append(game[row][col])\n if all_same(diags):\n print(f\"Player {diags[0]} is the winner diagonally (/)!\")\n return True\n\n diags = []\n for i in range(len(game)):\n diags.append(game[i][i])\n if all_same(diags):\n print(f\"Player {diags[0]} is the winner diagonally (\\\\)!\")\n return True\n\n# Vertical win\n vertic = []\n for col in range(len(game)):\n for row in game:\n vertic.append(row[col])\n if all_same(vertic):\n print(f\"Player {vertic[0]} is the winner vertically (|)!\")\n return True\n\n return False\n\n# Game logic\n\ndef game_board(game_map, player=0, row=0, column=0, just_display=False):\n try:\n if game_map[row][column] != 0:\n print(\"This position is used! Chosse another one!\")\n return game_map, False\n print(\" \"+\" \".join([str(i) for i in range(len(game_map))]))\n if not just_display:\n game_map[row][column] = player\n for count, row in enumerate(game_map):\n print(count, row)\n return game_map, True\n except IndexError as e:\n print(\"Error? make sure you input row/column as 0 1 or 2?\", e)\n return game_map, False\n\n except Exception as e:\n print(\"Something went very wrong\", e)\n return game_map, False\n\n# Game variables\nplay = True\nplayers = [1, 2]\nborder = (\"|\")\nwhile play:\n game_size = int(input(\"Please enter game size of Tic Tac Toe: \"))\n game = [[0 for i in range(game_size)] for i in range(game_size)]\n game_won = False\n game, _ = game_board(game, just_display=True)\n player_choice = itertools.cycle([1,2])\n while not game_won:\n current_player = next(player_choice)\n print(f\"Current Player: {current_player}\")\n played = False\n\n while not played:\n column_choice = int(input(\"What column do you want to play?: \"))\n row_choice = int(input(\"What row do you want to play?: \"))\n game, played = game_board(game, current_player, row_choice, column_choice)\n\n if win(game):\n game_won = True\n again = input(\"The game is over, would you like to play again? (y/n)\")\n if again.lower() == \"y\":\n print(\"Restarting\")\n elif again.lower() == \"n\":\n print(\"Good Bye\")\n play = False\n else:\n print(\"Not a valid answer\")\n play = False\n","sub_path":"Tic Tac Toe Dynamic.py","file_name":"Tic Tac Toe Dynamic.py","file_ext":"py","file_size_in_byte":3393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"371915589","text":"'''\r\nCreated on Dec 27, 2016\r\n\r\n@author: Keith gorlen@comcast.net\r\n\r\nReferences:\r\n https://projecteuler.net/problem=149\r\n https://en.wikipedia.org/wiki/Lagged_Fibonacci_generator\r\n https://en.wikipedia.org/wiki/Maximum_subarray_problem\r\n http://stackoverflow.com/questions/23069388/listing-elements-in-a-nested-lists-diagonally/23069625#23069625\r\n \r\n'''\r\n\r\nfrom itertools import count\r\n\r\ndef lfg(): # Lagged Fibonacci pseudo-random number generator\r\n s = [(100003 - 200003*k + 300007*k**3)%1000000 for k in range(1, 55 + 1)]\r\n for k in range(1, 55 + 1):\r\n yield s[k-1]\r\n \r\n k55 = 56 - 55 - 1\r\n k24 = 56 - 24 - 1\r\n for k in count(56):\r\n sk = (s[k24] + s[k55])%1000000\r\n if k == 100: assert sk == 86613 + 500000\r\n yield sk\r\n s[k55] = sk\r\n k55 = k55 + 1 if k55 < len(s) - 1 else 0\r\n k24 = k24 + 1 if k24 < len(s) - 1 else 0\r\n\r\ndef sequences(A):\r\n # horizontal\r\n for i in range(len(A)):\r\n yield A[i]\r\n # vertical\r\n for j in range(len(A[0])):\r\n yield [A[i][j] for i in range(1, len(A[0]))]\r\n # diagonal\r\n h, w = len(A), len(A[0])\r\n for p in range(h + w - 1):\r\n yield [A[h - p + q - 1][q] for q in range(max(p-h+1, 0), min(p+1, w))]\r\n # anti-diagonal\r\n for p in range(h + w - 1):\r\n yield [A[p - q][q] for q in range(max(p-h+1,0), min(p+1, w))]\r\n\r\ndef maxsum(a):\r\n maxsum = 0\r\n seq = sequences(a)\r\n for s in seq:\r\n max_ending_here = max_so_far = 0\r\n for x in s:\r\n max_ending_here = max(0, max_ending_here + x)\r\n max_so_far = max(max_so_far, max_ending_here)\r\n if max_so_far > maxsum:\r\n maxsum = max_so_far\r\n return maxsum\r\n \r\nTest = [[-2, 5, 3, 2],\r\n [ 9, -6, 5, 1],\r\n [ 3, 2, 7, 3],\r\n [-1, 8, -4, 8]]\r\n\r\nassert maxsum(Test) == 16\r\n\r\nnum = lfg()\r\ns = [[next(num) - 500000 for j in range(2000)] for i in range(2000)]\r\nassert s[0][10 - 1] == -393027\r\nassert s[0][100 - 1] == 86613\r\n\r\nm = maxsum(s)\r\nprint('p149 answer:', m)\r\nassert m == 52852124, 'WRONG!'","sub_path":"p149_max_sum_subsequence.py","file_name":"p149_max_sum_subsequence.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"384760315","text":"#!/usr/bin/env python\n\n################################################################################\n# # \n# PLOTS SOME QUANTITIES FOR EOS # \n# # \n################################################################################\n\nimport numpy as np\nimport h5py\nfrom scipy import interpolate,optimize\nimport matplotlib as mpl\nfrom matplotlib import pyplot as plt\nimport sys,os\nimport argparse\n\nCL = 2.99792458e10\nSMALL = 1e-20\nFONTSIZE = 18\nLINEWIDTH=3\n\nmpl.rcParams.update({'font.size':FONTSIZE})\n# mpl.rcParams.update({'font.family':'serif'})\n# mpl.rcParams.update({'mathtext.fontset':'stix'})\n\n# ======================================================================\n# Main function\n# ======================================================================\n# TODO: may need to be tuned per EOS\ndef analyze_eos(s, ye, infile_name, postfix='png'):\n print(\"Loading EOS\")\n eos = load_eos(infile_name)\n outpath = get_outpath(infile_name)\n print(\"Will save to {}\".format(outpath))\n print(\"Plotting entropy\")\n plot_entropy(eos,ye,\n figname = os.path.join(outpath,\n 'entropy.'+postfix))\n plt.cla()\n plt.clf()\n print(\"Plotting enthalpy\")\n plot_hm1(eos,ye,\n figname = os.path.join(outpath,\n 'hm1.'+postfix))\n plt.cla()\n plt.clf()\n print(\"Making contour plots\")\n contour_entropy(eos,ye,\n figname = os.path.join(outpath,\n 's_contours.'+postfix))\n plt.cla()\n plt.clf()\n contour_hm1_entropy(eos,ye,\n figname = os.path.join(outpath,\n 's_hm1_contours.'+postfix))\n plt.cla()\n plt.clf()\n\n print(\"Plotting adiabat\")\n print(\"...summary\")\n a = Adiabat(s,ye,eos)\n plot_adiabat(a,eos,\n figname = os.path.join(outpath,'adiabat.'+postfix))\n plt.cla()\n plt.clf()\n\n print(\"...lP\")\n plot_lP_adiabat(a,eos,\n figname = os.path.join(outpath,'lP_adiabat.'+postfix))\n plt.cla()\n plt.clf()\n\n print(\"Plotting thermodynamics of disk edge\")\n edge_map = map_disk_edge(eos,ye)\n plot_edge_map(*edge_map,\n figname=os.path.join(outpath,'edge_map.'+postfix))\n plt.cla()\n plt.clf()\n\n return\n \n# ======================================================================\n\n\n# ======================================================================\n# IO\n# ======================================================================\ndef load_eos(filename):\n\n eos = {}\n # load file\n with h5py.File(filename,'r') as f:\n for k,v in f.items():\n eos[k] = v[()]\n\n # derived quantities\n lrho = eos['logrho']\n lT = eos['logtemp']\n Ye = eos['ye']\n lP = eos['logpress']\n le = eos['logenergy']\n ent = eos['entropy']\n rho = 10.**lrho\n T = 10.**lT\n P = 10.**lP\n e = 10.**le - eos['energy_shift']\n w = rho*e + P\n h = CL*CL + e + P/rho\n hgeom = h/(CL*CL)\n hm1 = hgeom -1\n lhm1 = np.log10(np.abs(hm1))\n\n # No maximum filter\n dpdrhoe = eos['dpdrhoe']\n dpderho = eos['dpderho']\n cs2 = (dpdrhoe + (P/(rho*rho))*dpderho)/h\n cs2 = np.minimum(cs2,CL*CL)\n cs2 = np.maximum(cs2,10*SMALL)\n cs = np.sqrt(cs2)\n\n # quick aliases\n eos['lrho'] = lrho\n eos['lT'] = lT\n eos['Ye'] = Ye\n eos['lP'] = lP\n eos['le'] = le\n eos['ent'] = ent\n \n eos['rho'] = rho\n eos['T'] = T\n eos['P'] = P\n eos['e'] = e\n eos['w'] = w\n eos['h'] = h\n eos['hgeom'] = hgeom\n eos['hm1'] = hm1\n eos['lhm1'] = lhm1\n\n eos['cs2'] = cs2\n eos['cs'] = cs\n \n return eos\n# ======================================================================\n\n\n# ======================================================================\n# Adiabat workhorse\n# ======================================================================\nclass Adiabat:\n def __init__(self, s, Ye, eos):\n self.s = s\n self.Ye = Ye\n self.eos = eos\n self.iYe = get_iYe(Ye,eos)\n self.lrho_bounds, self.ilrho_bounds = get_lrho_min_max(s,Ye,eos)\n self.lT_adiabat = get_adiabat(s,self.iYe, eos,\n self.lrho_bounds,\n self.ilrho_bounds)\n self.slc = np.s_[self.ilrho_bounds[0]:self.ilrho_bounds[1]]\n self.lrho = self.bnd1d(eos['lrho'])\n\n @classmethod\n def get_if_valid(cls, s, Ye, eos):\n try:\n adiabat = cls(s, Ye, eos)\n except:\n raise ValueError(\"Invalid Adiabat\")\n if not adiabat.is_valid():\n raise ValueError(\"Invalid Adiabat\")\n return adiabat\n\n def bnd1d(self,var):\n return var[self.slc]\n\n def bnd3d(self,var):\n lrho = self.eos['lrho']\n lT = self.eos['lT']\n var1d = np.empty_like(lrho)\n for i in range(self.ilrho_bounds[0],self.ilrho_bounds[1]):\n vartemp = var[self.iYe,:,i]\n varinterp = interpolate.interp1d(lT,vartemp)\n t = self.lT_adiabat[i]\n v = varinterp(t)\n var1d[i] = v\n return self.bnd1d(var1d)\n\n def lT(self):\n return self.bnd1d(self.lT_adiabat)\n\n def project(self,var):\n if len(var.shape) == 1:\n if len(var) == len(self.eos['lT']):\n return self.lT()\n return self.bnd1d(var)\n if len(var.shape) == 3:\n return self.bnd3d(var)\n raise ValueError(\"var must be 1d or 3d\")\n\n def hm1bc(self):\n lP = self.project(self.eos['lP'])\n hm1 = self.project(self.eos['hm1'])\n lrho = self.project(self.eos['lrho'])\n dPdrhoe = self.project(self.eos['dpdrhoe'])\n rho = 10.**lrho\n P = 10.**lP\n rhointerp = interpolate.UnivariateSpline(rho,P)\n dPdrhos = rhointerp.derivative()\n hm1interp = interpolate.interp1d(lP,hm1)\n dPdrhoe_interp = interpolate.interp1d(lP,dPdrhoe)\n out = [lP.min(),\n float(dPdrhos(rho.min())),\n float(dPdrhoe_interp(lP.min())),\n float(hm1interp(lP.min()))]\n return out\n\n def is_valid(self):\n return np.all(np.gradient(self.project(self.eos['lT'])) >= 0)\n\n def get_rho_of_hm1_interp(self):\n rho_grid = self.project(self.eos['rho'])\n hm1_grid = self.project(self.eos['hm1'])\n fill_value = (0, self.project(self.eos['rho']).max())\n return interpolate.interp1d(hm1_grid,rho_grid,\n bounds_error = False,\n fill_value=fill_value)\n \n def __call__(self,var):\n return self.project(var)\n\ndef map_disk_edge(eos, Ye, entropies = None):\n if entropies is None:\n entropies = np.arange(2.,40.,0.5)\n\n lPmins = np.empty_like(entropies)\n hm1bcs = np.empty_like(entropies)\n dPdrhoss = np.empty_like(entropies)\n dPdrhoes = np.empty_like(entropies)\n\n for i,s in enumerate(entropies):\n try:\n a = Adiabat(s,Ye,eos)\n lPmins[i],dPdrhoss[i],dPdrhoes[i],hm1bcs[i] = a.hm1bc()\n except ValueError:\n lPmins[i],dPdrhoss[i],dPdrhoes[i],hm1bcs[i] = [np.NaN for i in range(4)]\n\n entropies = entropies[np.isfinite(hm1bcs)]\n lPmins = lPmins[np.isfinite(hm1bcs)]\n dPdrhoss = dPdrhoss[np.isfinite(hm1bcs)]\n dPdrhoes = dPdrhoes[np.isfinite(hm1bcs)]\n hm1bcs = hm1bcs[np.isfinite(hm1bcs)]\n\n return entropies,lPmins,dPdrhoss,dPdrhoes,hm1bcs\n# ======================================================================\n\n\n# ======================================================================\n# Visualization\n# ======================================================================\ndef plot_entropy(eos, ye, figname = None,\n vmax = 4, levels = [1,3,7,10,20,100]):\n mpl.rcParams.update({'font.size':FONTSIZE})\n\n iYe = get_iYe(ye,eos)\n lT = eos['lT']\n lrho = eos['lrho']\n ent = eos['ent']\n\n mesh = plt.pcolormesh(lT,lrho,np.log10(ent[iYe,:,:]).transpose(),vmax=vmax)\n mesh.set_edgecolor('face')\n plt.colorbar(mesh,label=r'$\\log_{10}$entropy ($k_b/$baryon)')\n CS = plt.contour(lT,lrho,ent[iYe,:,:].transpose(),\n levels=levels,colors='k')\n plt.clabel(CS,inline=1)\n\n # fig = plt.gcf()\n # fig.set_size_inches(12,8)\n plt.ylabel(r'$\\log_{10}\\rho$ (cgs)')\n plt.xlabel(r'$\\log_{10}T$ (MeV)')\n if figname is not None:\n plt.savefig(figname,bbox_inches='tight',rasterized=True)\n\ndef plot_hm1(eos, ye, figname = None,\n vmax = 0.025, levels = [0, 0.006, 0.009, 0.015, 0.02]):\n mpl.rcParams.update({'font.size':FONTSIZE})\n\n iYe = get_iYe(ye,eos)\n lT = eos['lT']\n lrho = eos['lrho']\n hm1 = eos['hm1']\n\n mesh = plt.pcolormesh(lT,lrho,hm1[iYe,:,:].transpose(),vmax=vmax)\n mesh.set_edgecolor('face')\n plt.colorbar(mesh,label=r'$\\frac{h}{c^2}-1$')\n CS = plt.contour(lT,lrho,hm1[iYe,:,:].transpose(),colors='k',\n levels=levels)\n plt.clabel(CS,inline=1)\n\n # fig = plt.gcf()\n # fig.set_size_inches(12,8)\n plt.ylabel(r'$\\log_{10}\\rho$ (cgs)')\n plt.xlabel(r'$\\log_{10}T$ (MeV)')\n if figname is not None:\n plt.savefig(figname, bbox_inches='tight',rasterized=True)\n\ndef contour_entropy(eos, ye, figname = None,\n vmax = 1, levels = [1,3,7,20,100]):\n mpl.rcParams.update({'font.size':FONTSIZE})\n\n iYe = get_iYe(ye,eos)\n lT = eos['lT']\n lrho = eos['lrho']\n ent = eos['ent']\n hm1 = eos['hm1']\n lhm1 = eos['lhm1']\n\n mesh = plt.pcolormesh(lT,lrho,lhm1[iYe,:,:].transpose(),vmax=vmax)\n mesh.set_edgecolor('face')\n plt.colorbar(mesh,label=r'$\\log_{10}\\left(\\frac{h}{c^2}-1\\right)$')\n CS = plt.contour(lT,lrho,ent[iYe,:,:].transpose(),\n levels=levels,colors='r')\n plt.clabel(CS,inline=1)\n\n lines = [CS.collections[0]]\n labels = [r's ($k_b/$baryon)']\n plt.legend(lines,labels,loc = 'lower right')\n\n # fig = plt.gcf()\n # fig.set_size_inches(12,8)\n plt.ylabel(r'$\\log_{10}\\rho$ (cgs)')\n plt.xlabel(r'$\\log_{10}T$ (MeV)')\n if figname is not None:\n plt.savefig(figname, bbox_inches='tight', rasterized=True)\n\ndef contour_hm1_entropy(eos, ye, figname = None,\n levels_hm1 = [0.006,0.009, 0.015],\n levels_ent = [1,4,8,12,16]):\n\n mpl.rcParams.update({'font.size':FONTSIZE})\n\n iYe = get_iYe(ye,eos)\n lT = eos['lT']\n lrho = eos['lrho']\n ent = eos['ent']\n hm1 = eos['hm1']\n \n CS1 = plt.contour(lT,lrho,hm1[iYe,:,:].transpose(),\n levels=levels_hm1,colors='r')\n plt.clabel(CS1,inline=1)\n\n CS2 = plt.contour(lT,lrho,ent[iYe,:,:].transpose(),\n levels=levels_ent,colors='k')\n plt.clabel(CS2,inline=1)\n\n lines = [CS1.collections[0], CS2.collections[0]]\n labels = [r'$\\frac{h}{c^2}-1$', r'entropy ($k_b$/baryon)']\n plt.legend(lines,labels, loc = 'lower right')\n\n # fig = plt.gcf()\n # fig.set_size_inches(12,8)\n \n plt.xlabel(r'$\\log_{10}T$ (MeV)')\n plt.ylabel(r'$\\log_{10}\\rho$ (cgs)')\n \n if figname is not None:\n plt.savefig(figname, bbox_inches='tight')\n\ndef plot_edge_map(entropies,lPmins,dPdrhoss,dPdrhoes,hm1bcs,\n figname = None):\n mpl.rcParams.update({'font.size':FONTSIZE})\n\n fig, axarr = plt.subplots(2,2,sharex=True)\n lPax = axarr[1,0]\n hm1ax = axarr[0,0]\n dpdre_ax = axarr[1,1]\n dpdrs_ax = axarr[0,1]\n \n lPax.plot(entropies,lPmins)\n lPax.set_ylabel(r'$\\min\\left(\\log_{10}P\\right)$')\n \n hm1ax.plot(entropies,hm1bcs)\n hm1ax.set_ylabel(r'$\\left[\\frac{h}{c^2} - 1\\right]_{\\min\\left(\\log_{10}P\\right)}$')\n \n dpdre_ax.plot(entropies,dPdrhoes/1e17)\n dpdre_ax.set_ylabel(r'$10^{17}\\times \\left[\\frac{\\partial P}{\\partial\\rho}\\right]_\\varepsilon^{\\min P}$')\n \n dpdrs_ax.plot(entropies,dPdrhoss/1e17)\n dpdrs_ax.set_ylabel(r'$10^{17}\\times \\left[\\frac{\\partial P}{\\partial\\rho}\\right]_s^{\\min P}$')\n\n axarr[1,0].set_xlabel(r'entropy ($k_b$/baryon)')\n axarr[1,1].set_xlabel(r'entropy ($k_b$/baryon)')\n\n # fig.set_size_inches(12,8)\n # plt.tight_layout()\n\n if figname is not None:\n plt.savefig(figname,bbox_inches='tight')\n\ndef plot_adiabat(a, eos, figname=None):\n\n mpl.rcParams.update({'font.size':FONTSIZE})\n\n lP = eos['lP']\n hm1 = eos['hm1']\n lhm1 = eos['lhm1']\n lrho = eos['lrho']\n lT = eos['lT']\n le = eos['le']\n\n fig, axarr = plt.subplots(2,2)\n\n hm1_ax = axarr[0,0]\n hm1_ax.plot(a(lP),a(lhm1),lw=LINEWIDTH)\n hm1_ax.set_xlabel(r'$\\log_{10}P$ (cgs)')\n hm1_ax.set_ylabel(r'$\\log_{10}(h/c^2-1)$')\n\n lT_ax = axarr[0,1]\n lT_ax.plot(a(lrho),a(lT),lw=LINEWIDTH)\n lT_ax.set_xlabel(r'$\\log_{10}\\rho$ (cgs)')\n lT_ax.set_ylabel(r'$\\log_{10}T$ (MeV)')\n\n P_ax = axarr[1,1]\n P_ax.plot(a(lrho),a(lP),lw=LINEWIDTH)\n P_ax.set_xlabel(r'$\\log_{10}\\rho$')\n P_ax.set_ylabel(r'$\\log_{10}P$')\n\n eps_ax = axarr[1,0]\n eps_ax.plot(a(lrho),a(le),lw=LINEWIDTH)\n eps_ax.set_xlabel(r'$\\log_{10}\\rho$')\n eps_ax.set_ylabel(r'$\\log_{10}\\varepsilon$')\n\n plt.suptitle(r'$s = %s$ $k_b$/baryon, $Y_e = %s$' % (a.s, a.Ye),y=1.08)\n\n # fig.set_size_inches(12,8)\n # plt.tight_layout()\n\n if figname is not None:\n plt.savefig(figname,bbox_inches='tight')\n\ndef plot_lP_adiabat(a, eos, figname = None):\n\n mpl.rcParams.update({'font.size':FONTSIZE})\n\n lP = eos['lP']\n lhm1 = eos['lhm1']\n\n plt.plot(a(lP),a(lhm1), lw = LINEWIDTH)\n plt.xlabel(r'$\\log_{10}P$ (cgs)')\n plt.ylabel(r'$\\log_{10}(h/c^2-1)$')\n\n ax = plt.gca()\n for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +\n ax.get_xticklabels() + ax.get_yticklabels()):\n item.set_fontsize(FONTSIZE)\n\n if figname is not None:\n plt.savefig(figname, bbox_inches = 'tight')\n\n# ======================================================================\n\n\n# ======================================================================\n# Utility functions\n# ======================================================================\ndef get_iYe(mYe,eos):\n iYe = np.where(eos['Ye'] >= mYe)[0][0]\n return iYe\n\ndef get_lrho_min_max(s, Ye, eos):\n iYe = get_iYe(Ye,eos)\n lrho = eos['lrho']\n ent = eos['ent']\n sinterp_minT = interpolate.interp1d(lrho,ent[iYe,0])\n sinterp_maxT = interpolate.interp1d(lrho,ent[iYe,-1])\n try:\n lrho_min = optimize.brentq(lambda r: sinterp_minT(r) - s,\n lrho.min(),lrho.max())\n except ValueError:\n lrho_min = lrho.min()\n try:\n lrho_max = optimize.brentq(lambda r: sinterp_maxT(r) - s,\n lrho.min(), lrho.max())\n except ValueError:\n lrho_max = lrho.max()\n\n ilrho_min = np.where(lrho >= lrho_min)[0][0]\n ilrho_max = np.where(lrho >= lrho_max)[0][0]\n\n return (lrho_min, lrho_max),(ilrho_min,ilrho_max)\n\ndef get_adiabat(s, iYe, eos, lrho_bounds, ilrho_bounds):\n lrho = eos['lrho']\n lT = eos['lT']\n ent = eos['ent']\n lrho_min,lrho_max = lrho_bounds\n ilrho_min,ilrho_max = ilrho_bounds\n\n lT_adiabat = -np.infty*np.ones_like(lrho)\n for i in range(ilrho_min,ilrho_max):\n s_interp = interpolate.interp1d(lT,ent[iYe,:,i])\n lTs = optimize.brentq(lambda t: s_interp(t) - s,lT.min(),lT.max())\n lT_adiabat[i] = lTs\n\n return lT_adiabat\n\ndef get_outpath(infile):\n outpath_name = infile.rstrip('.h5') + '_analysis'\n if not os.path.exists(outpath_name):\n os.makedirs(outpath_name)\n return outpath_name\n# ======================================================================\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='Plot various properties of an EOS relevant for Fishbone-Moncrief disks')\n parser.add_argument('s',type=float,\n help='Entropy for isocontours. In k_b/baryon')\n parser.add_argument('Ye',type=float,\n help='Electron fraction. Assumed to be constant.')\n parser.add_argument('filename',type=str,\n help='Name of EOS file to read. Assumed to be in Stellar Collapse format.')\n parser.add_argument('--pdf',dest='pdf',action='store_true',\n help='Set to save plots to pdf format')\n args = parser.parse_args()\n print((\"Analyzing EOS {}\\n\\t\"\n +\"Assuming:\"\n +\"\\n\\t\\tEntropy s = {}\\n\\t\\t\"\n +\"Electron fraction Ye = {}.\").format(args.filename,\n args.s,args.Ye))\n\n postfix = 'pdf' if args.pdf else 'png'\n analyze_eos(args.s,args.Ye,args.filename,postfix)\n print(\"Done!\")\n","sub_path":"script/analysis/analyze_eos.py","file_name":"analyze_eos.py","file_ext":"py","file_size_in_byte":16888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"624130555","text":"\"\"\"Tests for mro.py.\"\"\"\n\n\nfrom pytype import mro\nfrom pytype.pytd import pytd\n\nimport unittest\n\n\nclass MroTest(unittest.TestCase):\n\n def testFlattenSuperclasses(self):\n cls_a = pytd.Class(\"A\", None, (), (), (), ())\n cls_b = pytd.Class(\"B\", None, (cls_a,), (), (), ())\n cls_c = pytd.Class(\"C\", None, (cls_a,), (), (), ())\n cls_d = pytd.Class(\"D\", None, (cls_c,), (), (), ())\n cls_e = pytd.Class(\"E\", None, (cls_d, cls_b), (), (), ())\n self.assertItemsEqual(mro.flattened_superclasses(cls_e),\n [cls_a, cls_b, cls_c, cls_d, cls_e])\n","sub_path":"pytype/mro_test.py","file_name":"mro_test.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"164491686","text":"from flask import Blueprint, request, jsonify, make_response\nfrom werkzeug.exceptions import BadRequest\nfrom app.api.models.meetups_model import MeetupsModel\nfrom app.api.models.users_model import login_required\n\ndb = MeetupsModel()\n\nmeetup = Blueprint('meetup', __name__, url_prefix=\"/api/v2/meetups\")\n\n@meetup.route(\"\", methods=[\"POST\"])\n@login_required\ndef post_meetup(current_user):\n \"\"\"Register new user endpoint\"\"\"\n error = \"\"\n status = 200\n response = {}\n\n try:\n data = request.get_json()\n db.validate_meetup_data(data)\n\n topic = data[\"topic\"]\n location = data[\"location\"]\n happeningOn = data[\"happeningOn\"]\n tags = data[\"tags\"]\n\n # if current_user[\"is_admin\"] is False:\n # error = \"Requires Admin Login!\"\n # status = 403\n # else:\n data = dict(\n user=current_user[\"user_id\"],\n location=location,\n topic=topic,\n happeningOn = happeningOn,\n tags=tags \n )\n \n meetup_id = db.post_meetup(data)\n status = 201\n except BadRequest:\n raise BadRequest()\n\n if error:\n response.update({\"status\": status, \"error\": error})\n return jsonify(response), status\n\n response.update({\"status\": status, \"data\": meetup_id})\n return jsonify(response), status\n","sub_path":"app/api/views/meetup_endpoints.py","file_name":"meetup_endpoints.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"427841396","text":"# 엑셀을 다루기 위해 필요한 파이썬 모듈\nimport xlsxwriter\nimport openpyxl\nimport os\n \n\ndef prepare_excel():\n # 최종 출석부 엑셀파일 오픈\n excel_01 = openpyxl.load_workbook(\"출석부01.xlsx\")\n sheet_01 = excel_01[\"Sheet1\"]\n # LC 분반 기재 엑셀파일 오픈\n excel_LC = openpyxl.load_workbook(\"LC별배정내역.xlsx\")\n sheet_LC = excel_LC[\"GEDT01401\"]\n\n # 종합 출석부 생성\n excel_Final = xlsxwriter.Workbook(\"FINAL_01.xlsx\")\n sheet_Final = excel_Final.add_worksheet(\"종합\")\n \n writeExcel(sheet_Final, 0, 0, \"순번\")\n writeExcel(sheet_Final, 0, 1, \"계열 / 학과\")\n writeExcel(sheet_Final, 0, 2, \"LC 분반\")\n writeExcel(sheet_Final, 0, 3, \"학번\")\n writeExcel(sheet_Final, 0, 4, \"이름\")\n\n student_list_LC = []\n counter1 = 0\n # LC파일에서 학번만 모아서 리스트로 저장\n for rownum_LC in range(1, 150):\n student_id_LC = sheet_LC.cell(row=rownum_LC, column=5).value\n student_LC = sheet_LC.cell(row=rownum_LC, column=4).value\n if(type(student_id_LC) == int):\n student_list_LC.append(student_id_LC)\n counter1 += 1\n \n # 최종 출석 파일에서 학번을 가져오기\n counter = 0\n for rownum_01 in range(1, 230):\n student_id_01 = sheet_01.cell(row=rownum_01, column=13).value\n if(type(student_id_01) == int):\n if(student_id_01 in student_list_LC):\n counter += 1\n dept = sheet_01.cell(row=rownum_01, column=8).value\n LC = student_LC\n studentid = student_id_01\n name = sheet_01.cell(row=rownum_01, column=15).value\n print(counter, dept, LC, studentid, name)\n \n excel_01.close()\n excel_Final.close()\n\ndef writeExcel(file, row, column, content):\n file.write(row, column, content)\n\n \n# 메인 함수\nos.chdir(\"C:\\\\Users\\\\Joshua Y. S. Jung\\\\Downloads\\\\컴퓨팅사고\")\nprepare_excel()\n\n","sub_path":"Divide_LC(Final).py","file_name":"Divide_LC(Final).py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"143540057","text":"#Python 3 - Tic tac toe - Sentdex tutorial \nimport itertools\n\n\ndef win(current_game): \n\n def same_code(l):\n if l.count(l[0]) == len(l) and l[0] !=0:\n return True \n else:\n return False\n#Should pull in/shorten the \"Player...is the winner\" line!\n\n #horizontall winner\n for row in game: \n print(row)\n if same_code(row):\n print(f\"Player {row[0]} is the winner horizontally!\")\n return True\n\n diags = [] #diagonal winner\n for col, row in enumerate(reversed(range(len(game)))):\n diags.append(game[row][col])\n if same_code(diags):\n print(f\"Player {diags[0]} is the winner diagonally (/)!\")\n return True\n\n diags = []\n for ix in range(len(game)):\n diags.append(game[ix][ix])\n if same_code(diags):\n print(f\"Player {diags[0]} is the winner diagonally (\\\\)!\")\n return True\n\n for col in range(len(game)): #vertically\n check = []\n for row in game:\n check.append(row[col])\n if same_code(check):\n print(f\"Player {check[0]} is the winner vertically! \")\n return True\n\n return False\n\ndef game_board(game_map,player=0, row=0, column=0, just_display=False):\n try:\n if game_map[row][column] !=0:\n print(\"This position in taken! Choose a free one\")\n return game_map, False\n print(\" a b c \") \n if not just_display:\n game_map[row][column] = player\n for count, row in enumerate(game_map):\n print (count, row)\n return game_map, True\n #handeling errors\n except IndexError as e: \n print('Error: Please input the values 0, 1 or 2!', e) \n return game_map, False\n except Exception as e:\n print(\"OPS! Something went wrong!\", e)\n return game_map, False\n\nplay = True\nplayers = [1, 2]\nwhile play:\n game_size = int(input(\"What size would you like to play of Tic Tac Toe?\" ))\n game = [[0 for i in range(game_size)] for i in range(game_size)]\n game_won = False\n game, _ = game_board(game, just_display=True)\n player_choice = itertools.cycle([1,2])\n while not game_won:\n current_player = next(player_choice)\n print(f\"Current Player: {current_player}\")\n played = False\n\n while not played:\n column_choice = int(input(\"Play column 0, 1, 2: \"))\n row_choice = int(input(\"Play row 0, 1, 2: \"))\n game, played = game_board(game, current_player, row_choice, column_choice)\n \n if win(game):\n game_won = True\n again = input(\"Rematch? Press y | Exit? Press n\")\n if again.lower() == \"y\":\n print(\"Restarting!\")\n elif again.lower() == \"n\":\n print(\"Goodbye!\")\n play = False\n else: \n print(\"Not a valid answer. Please press y or n, on your keybord!\")\n \n play = False\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"176026466","text":"n=int(input())\npair=[]\npair2=[]\nfor i in range(n):\n pair.append(\"\")\n pair2.append(\"\")\nline=input()\nline2=input()\nt=0\nt2=0\nfor i in range(len(line)):\n if line[i]==\" \":\n t+=1\n else:\n pair[t]+=line[i]\n \n if line2[i]==\" \":\n t2+=1\n else:\n pair2[t2]+=line2[i]\n\nc=\"good\"\nfor i in range(n):\n name=pair[i]\n name2=pair2[i]\n index=pair2.index(name)\n index2=pair.index(name2)\n if name==name2 or index!=index2:\n c=\"bad\"\n break\n\nprint(c)\n","sub_path":"2014/Senior/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"30216675","text":"\"\"\"\n# -------------------------------------encoding: UTF-8-------------------------------------\n# Name : Gobang game \n# Author : simon \n# e-mail : 2441873702@qq.com\n# Date : 2020.05.28 19:09\n# version: v2.1\n# http://www.pyinstaller.org/downloads.html\n# http://www.ico51.cn/ \n# https://tool.oschina.net/commons?type=3\n# to-do : 增加结束判定,数据统计等,可实现重复游戏 ———— done\n# to-do : 添加人机对战模式:目前只能完成鼠标点击前两次的棋子添加动作,但仍存在bug,\n 可能有棋子重合的情况——需要对随机候选序列进行剔除,排除可能的选项\n\n# bug 1 : 当鼠标点击到画布棋盘外仍可显示棋子 ———— fixed\n# bug 2 : 棋子会覆盖之前已经绘制的位置 ———— fixed\n# bug 3 : 棋子数量达到一定时,不会判定结果 ———— fixed\n# bug 4 : 在resize到最大化或者是放大后,下面边框无法放置棋子 ———— fixed\n# bug 5 : 在可下棋时,无法使用ESC退出游戏 ———— fixed\n--------------------------------------------------------------------------------------------\n\"\"\"\n\nimport pygame\nimport pygame.freetype\nimport random\n\n# fps setting\nfps = 300\n\n# default str value\nsize = width, height = 800, 600\nborder = 50 \nwlc_str = \"Welcome to gobang game!\"\nsuccessor = \"\"\n\n# default color\nbg_color = (190,190,190) # grey\nline_color = 0,0,0\n\n# chess color\nWHITE = 255,255,255\nBLACK = 0,0,0\nRED = 255,0,0\nBLUE = 0,0,255\nGREEN = 0,255,0\nfont_color = 0,0,0\n\npygame.init()\nfclock = pygame.time.Clock()\n# pygame Surface\nscreen = pygame.display.set_mode(size, pygame.RESIZABLE)\nbackground = pygame.Surface(screen.get_size())\ncaption = \"Gobang Game\"\npygame.display.set_caption(caption)\nicon = pygame.image.load('gobang_logo.png')\npygame.display.set_icon(icon)\n\n\ndef draw_font(background, string, font_size, font_color, position=(0,0)):\n\t# font_type = pygame.freetype.Font('C://Windows//Fonts//msyh.ttc', 1)\n\tfont_type = pygame.freetype.Font('./consola.ttf', 1)\t\n\tfont_rect = font_type.render_to(background, position, string, fgcolor=font_color, size=font_size)\n\tscreen.blit(background, (0, 0))\n\ndef draw_chessboard_rect(background, rect_point, border):\n\tx_num = int((width - 1.5 * border) / border)\n\ty_num = int((height - 1.5 * border) / border)\n\tfor num_w in range(x_num):\n\t\tfor num_h in range(y_num):\n\t\t\trect_point.append([num_w*border + 50, num_h*border + 50])\n\tfor item in rect_point:\n\t\ts_rect = item[0], item[1], border, border\n\t\tpygame.draw.rect(background, line_color, s_rect, 1)\n\treturn rect_point\n\ndef success(position):\n\tfor item in position:\n\t\t# 行 +\n\t\tif [item[0]+1,item[1]] in position:\n\t\t\tif [item[0]+2,item[1]] in position:\n\t\t\t\tif [item[0]+3,item[1]] in position:\n\t\t\t\t\tif ([item[0]+4,item[1]] in position):\n\t\t\t\t\t\t# print(\"success!\")\n\t\t\t\t\t\treturn True\n\t\t# 行 -\n\t\telif [item[0]-1,item[1]] in position:\n\t\t\tif [item[0]-2,item[1]] in position:\n\t\t\t\tif [item[0]-3,item[1]] in position:\n\t\t\t\t\tif ([item[0]-4,item[1]] in position):\n\t\t\t\t\t\t# print(\"success!\")\n\t\t\t\t\t\treturn True\n\t\t# 列 +\n\t\telif [item[0],item[1]+1] in position:\n\t\t\tif [item[0],item[1]+2] in position:\n\t\t\t\tif [item[0],item[1]+3] in position:\n\t\t\t\t\tif [item[0],item[1]+4] in position:\n\t\t\t\t\t\treturn True\n\t\t# 列 -\n\t\telif [item[0],item[1]-1] in position:\n\t\t\tif [item[0],item[1]-2] in position:\n\t\t\t\tif [item[0],item[1]-3] in position:\n\t\t\t\t\tif [item[0],item[1]-4] in position:\n\t\t\t\t\t\treturn True\n\t\t# 斜对角 + \n\t\telif [item[0]+1,item[1]+1] in position:\n\t\t\tif [item[0]+2,item[1]+2] in position:\n\t\t\t\tif [item[0]+3,item[1]+3] in position:\n\t\t\t\t\tif [item[0]+4,item[1]+4] in position:\n\t\t\t\t\t\t# print(\"success!\")\n\t\t\t\t\t\treturn True\n\t\t# 斜对角 - \n\t\telif [item[0]-1,item[1]-1] in position:\n\t\t\tif [item[0]-2,item[1]-2] in position:\n\t\t\t\tif [item[0]-3,item[1]-3] in position:\n\t\t\t\t\tif [item[0]-4,item[1]-4] in position:\n\t\t\t\t\t\t# print(\"success!\")\n\t\t\t\t\t\treturn True\n\t\t# 反对角 +\n\t\t# fix bug 3\n\t\telif [item[0]+1,item[1]-1] in position:\n\t\t\tif [item[0]+2,item[1]-2] in position:\n\t\t\t\tif [item[0]+3,item[1]-3] in position:\n\t\t\t\t\tif [item[0]+4,item[1]-4] in position:\n\t\t\t\t\t\t# print(\"success!\")\n\t\t\t\t\t\treturn True\n\t\t# 反对角 -\n\t\t# fix bug 3\n\t\telif [item[0]-1,item[1]+1] in position:\n\t\t\tif [item[0]-2,item[1]+2] in position:\n\t\t\t\tif [item[0]-3,item[1]+3] in position:\n\t\t\t\t\tif [item[0]-4,item[1]+4] in position:\n\t\t\t\t\t\t# print(\"success!\")\n\t\t\t\t\t\treturn True\n\n\ndef success_judge(chess_dict_all):\n\tblack_pos = []\n\twhite_pos = []\n\tglobal successor\n\tfor item in chess_dict_all:\n\t\tx = item.split(\",\", 1)\n\t\tif chess_dict_all[item] == \"white\":\n\t\t\twhite_pos.append([int(x[0]),int(x[1])])\n\t\telif chess_dict_all[item] == \"black\":\n\t\t\tblack_pos.append([int(x[0]),int(x[1])])\n\t\telse:\n\t\t\tpass\n\n\tif success(white_pos) and not success(black_pos):\n\t\tsuccessor = \"White\"\n\t\treturn True\n\telif success(black_pos) and not success(white_pos):\n\t\tsuccessor = \"Black\"\n\t\treturn True\n\telif not success(white_pos) and not success(black_pos):\n\t\tsuccessor = \"\"\n\t\treturn False\n\n\ndef game_over(background, delay_time):\n\timport time,sys\n\tdraw_font(background, \"game over!\", 20, RED, (300,30))\n\ttime.sleep(delay_time)\n\tsys.exit()\n\n\ndef check_three(position, check_buffer):\n\tcheck_buffer = []\n\tfor item in position:\n\t\t# 行 +\n\t\tif [item[0]+1,item[1]] in position:\n\t\t\tif [item[0]+2,item[1]] in position:\n\t\t\t\tcheck_buffer.append(item)\n\t\t\t\tcheck_buffer.append([item[0]+1,item[1]])\n\t\t\t\tcheck_buffer.append([item[0]+2,item[1]])\n\t\t\t\t# print(\"success!\")\n\t\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\t\t\"\"\"\n\t\t# 行 -\n\t\tif [item[0]-1,item[1]] in position:\n\t\t\tif [item[0]-2,item[1]] in position:\n\t\t\t\tcheck_buffer.append(item)\n\t\t\t\tcheck_buffer.append([item[0]+1,item[1]])\n\t\t\t\tcheck_buffer.append([item[0]+2,item[1]])\n\t\t\t\t# print(\"success!\")\n\t\t\t\treturn True\n\t\t# 列 +\n\t\telif [item[0],item[1]+1] in position:\n\t\t\tif [item[0],item[1]+2] in position:\n\t\t\t\tcheck_buffer.append(item)\n\t\t\t\tcheck_buffer.append([item[0]+1,item[1]])\n\t\t\t\tcheck_buffer.append([item[0]+2,item[1]])\n\t\t\t\treturn True\n\t\t# 列 -\n\t\telif [item[0],item[1]-1] in position:\n\t\t\tif [item[0],item[1]-2] in position:\n\t\t\t\tcheck_buffer.append(item)\n\t\t\t\tcheck_buffer.append([item[0]+1,item[1]])\n\t\t\t\tcheck_buffer.append([item[0]+2,item[1]])\n\t\t\t\treturn True\n\t\t# 斜对角 + \n\t\telif [item[0]+1,item[1]+1] in position:\n\t\t\tif [item[0]+2,item[1]+2] in position:\n\t\t\t\tcheck_buffer.append(item)\n\t\t\t\tcheck_buffer.append([item[0]+1,item[1]])\n\t\t\t\tcheck_buffer.append([item[0]+2,item[1]])\n\t\t\t\t# print(\"success!\")\n\t\t\t\treturn True\n\t\t# 斜对角 - \n\t\telif [item[0]-1,item[1]-1] in position:\n\t\t\tif [item[0]-2,item[1]-2] in position:\n\t\t\t\tcheck_buffer.append(item)\n\t\t\t\tcheck_buffer.append([item[0]+1,item[1]])\n\t\t\t\tcheck_buffer.append([item[0]+2,item[1]])\n\t\t\t\t# print(\"success!\")\n\t\t\t\treturn True\n\t\t# 反对角 +\n\t\t# fix bug 3\n\t\telif [item[0]+1,item[1]-1] in position:\n\t\t\tif [item[0]+2,item[1]-2] in position:\n\t\t\t\tcheck_buffer.append(item)\n\t\t\t\tcheck_buffer.append([item[0]+1,item[1]])\n\t\t\t\tcheck_buffer.append([item[0]+2,item[1]])\n\t\t\t\t# print(\"success!\")\n\t\t\t\treturn True\n\t\t# 反对角 -\n\t\t# fix bug 3\n\t\telif [item[0]-1,item[1]+1] in position:\n\t\t\tif [item[0]-2,item[1]+2] in position:\n\t\t\t\tcheck_buffer.append(item)\n\t\t\t\tcheck_buffer.append([item[0]+1,item[1]])\n\t\t\t\tcheck_buffer.append([item[0]+2,item[1]])\n\t\t\t\t# print(\"success!\")\n\t\t\t\treturn True\n\t\t\"\"\"\n\t\t# else:\n\t\t# \treturn False\n\n\n\"\"\"\nchess_dict\nchess_dict_mouse\nchess_dict_machine\n\"\"\"\n# 算法 most important \n# 初始化位置\n# 检查敌方是否有3个及以上直接相连的位置并检查是否有连成5个的可能性,\n# 若有则堵截靠近落点,若无则落点于己方3个及以上直接相连且有连成5个的可能性的位置旁边\n# \n# def get_machine_chess_position(mouse_position, machine_position):\n# \tcheck_buffer = []\n# \tprint(check_three)\n# \tif check_three(mouse_position, check_buffer):\n# \t\tprint(check_buffer)\n# \t\t# 需要堵截\n# \t\treturn machine_position\n\ndef draw_chess_position(mode_flag, mouse_position, machine_position, chess_dict_all, chess_dict_mouse, chess_dict_machine):\n\timport time\n\t# normalize position initial\n\tmouse_position_nor = []\n\tmachine_position_nor = []\n\tnew_mouse_dict = []\n\tnew_mechine_dict = []\n\tcount = 0\n\t# draw mouse_position\n\tfor position in mouse_position:\n\t\t# position rounding:\n\t\tposition = [round(position[0] / 50) * 50, round(position[1] / 50) * 50]\n\n\t\t# if (width//50 > (pos[0]/50) > 0) and (height//50 > (pos[1]//50) > 0):\t\t# fix bug 4\n\t\tif (round(width/50) > (position[0]/50) > 0) and (round(height/50) > (position[1]//50) > 0):\n\t\t\tposition_nor = [position[0]//50, position[1]//50]\t\t\t# normalize [0, 1]\n\t\t\tkey = str(position_nor[0])+\",\"+str(position_nor[1])\t\t\t# key of chess_dict_mouse < chess_dict_all\n\t\t\t# print(key)\n\t\t\t# chess_flag : None -> no, \"white\" -> white chess, \"black\" -> black chess\n\n\t\t\tmouse_position_nor.append(position_nor)\t\t\t\t\t# normalize [[]]\n\t\t\t# print(len(machine_position_nor))\n\t\t\tif key not in chess_dict_all:\n\t\t\t\t# this condition can draw chess circle\n\t\t\t\tdraw_chess_flag = True\n\t\t\t\t# draw counter \n\t\t\t\tif mode_flag:\n\t\t\t\t\t# machine challenge\n\t\t\t\t\tchess_color, chess_flag = BLACK, \"black\"\n\t\t\t\t\tnew_mouse_dict = {key : chess_flag}\n\t\t\t\t\tdraw_one_chess(background, position, chess_color)\t\t# un-normalize position of persons\n\t\t\t\t\tchess_dict_mouse.update(new_mouse_dict)\n\t\t\t\t\tchess_dict_all.update(new_mouse_dict)\n\n\t\t\t\t\tfor item_num in range(len(mouse_position_nor)):\n\t\t\t\t\t\tif not mouse_position_nor:\t\t\t\t\t\t\t# when mouse_position_nor = [] --> continue\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t# item = mouse_position_nor[item_num]\n\t\t\t\t\t\t# # first and second chess random [] \n\t\t\t\t\t\t# temp = [[item[0]-1, item[1]-1], [item[0],item[1]-1], [item[0]+1, item[1]-1],[item[0]-1, item[1]], \n\t\t\t\t\t\t# [item[0]+1, item[1]], [item[0]-1, item[1]+1], [item[0]+2, item[1]-1], [item[0]+1, item[1]-1]]\n\t\t\t\t\t\t# for item_temp in temp:\n\t\t\t\t\t\t# \tif (item_temp in mouse_position_nor) or (item_temp in machine_position_nor):\n\t\t\t\t\t\t# \t\tdel item_temp\n\t\t\t\t\t\t# if item_num == 0:\n\t\t\t\t\t\t# \tmachine_position_nor.append([4,5])\n\t\t\t\t\t\t# \t# machine_position_nor.append(temp[first_chess])\n\t\t\t\t\t\t# elif item_num == 1:\n\t\t\t\t\t\t# \tmachine_position_nor.append([5,5])\n\t\t\t\t\t\t# \t# machine_position_nor.append(temp[second_chess])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tmachine_position_nor.append([mouse_position_nor[item_num][0]+1,mouse_position_nor[item_num][1]+1])\n\t\t\t\t\t\t\t# break\n\t\t\t\t\t\tif len(machine_position_nor) > 0:\n\t\t\t\t\t\t\tfor pos in machine_position_nor:\n\t\t\t\t\t\t\t\tmachine_key = str(pos[0])+\",\"+str(pos[1])\n\t\t\t\t\t\t\t\tif machine_key not in chess_dict_all:\n\t\t\t\t\t\t\t\t\tdraw_chess_flag = True\n\t\t\t\t\t\t\t\t\tchess_color, chess_flag = WHITE, \"white\"\n\t\t\t\t\t\t\t\t\tnew_mechine_dict = {machine_key : chess_flag}\n\t\t\t\t\t\t\t\t\tposition = [pos[0] * 50, pos[1] * 50]\t\t# un-normalize position of machine\n\t\t\t\t\t\t\t\t\tdraw_one_chess(background, position, chess_color)\n\t\t\t\t\t\t\t\t\tchess_dict_machine.update(new_mechine_dict)\n\t\t\t\t\t\t\t\t\tchess_dict_all.update(new_mechine_dict)\n\t\t\t\telse:\n\t\t\t\t\t# two person\n\t\t\t\t\tif count % 2 == 0:\n\t\t\t\t\t\tchess_color, chess_flag = BLACK, \"black\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tchess_color, chess_flag = WHITE, \"white\"\n\t\t\t\t\tcount = count + 1\n\t\t\t\t\tnew_mouse_dict = {key : chess_flag}\n\t\t\t\t\tdraw_one_chess(background, position, chess_color)\t\t# un-normalize position of persons\n\t\t\t\t\tchess_dict_mouse.update(new_mouse_dict)\n\t\t\t\t\tchess_dict_all.update(new_mouse_dict)\n\t\t\telse:\n\t\t\t\tdraw_chess_flag = False\n\n\n\t# check_buffer = []\n\t# for item in mouse_position:\n\t# \tif [item[0]+1,item[1]] in mouse_position:\n\t# \t\tif [item[0]+2,item[1]] in mouse_position:\n\t# \t\t\tcheck_buffer.append(item)\n\t# \t\t\tcheck_buffer.append([item[0]+1,item[1]])\n\t# \t\t\tcheck_buffer.append([item[0]+2,item[1]])\n\t# \t\t\tprint(\"check success!\")\n\n\t# # 归一化 mouse_position\n\t# mouse_position = []\n\t# for item in chess_dict_mouse:\n\t# \tx = item.split(\",\", 1)\n\t# \tmouse_position.append([int(x[0]),int(x[1])])\n\t# # time.sleep(0.5)\t\t# 延时的位置应该如何放置?鼠标点击后立即绘制一个颜色的棋子,一定延时后machine给出另一颗棋子\n\t\n\n\n\t# for item in mouse_position:\n\t# \tif len(mouse_position) >= 1:\n\t# \t\t# 随机函数由于每次循环都会选择一个不同的数字,所以不能使用随机函数来直接进行选择,但是可以间接操作\n\n\n\n# put chess down \ndef draw_one_chess(background, position, color):\n\tpygame.draw.circle(background, color, position, 20, 0)\n\n\nmouse_position = []\nmachine_position = []\nblack_position = []\nwhite_position = []\nend_flag = False\ndraw_chess_flag = False\nreset_flag = False\ninfo_flag = False\nmode_flag = False\t# True-->machine, False-->two persons\ngame_info = [total, white_win, black_win] = [0, 0, 0]\nfirst_chess = int(random.random()*5)\nsecond_chess = int(random.random()*5)\n\n# print(game_info[1])\n\nwhile True:\n\n\tquit_flag = False\n\n\t# event manage\n\tfor event in pygame.event.get():\n\t\t# quit\n\t\tif not quit_flag:\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tquit_flag = True\n\t\t\t\tbreak\n\t\t\telif event.type == pygame.KEYDOWN:\n\t\t\t\tif event.key == pygame.K_ESCAPE:\n\t\t\t\t\tquit_flag = True\n\t\t\t\tif end_flag:\n\t\t\t\t\tif event.key == pygame.K_RETURN:\n\t\t\t\t\t\treset_flag = True\n\t\t\t\t\t\tinfo_flag = False\n\t\t\telif event.type == pygame.VIDEORESIZE:\n\t\t\t\tsize = width, height = event.size[0], event.size[1]\n\t\t\t\tscreen = pygame.display.set_mode(size, pygame.RESIZABLE)\n\t\t\t\tbackground = pygame.Surface(screen.get_size())\n\n\t\t\telif not end_flag:\n\t\t\t\tif event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\t\t\tmouse_position.append([event.pos[0],event.pos[1]])\t# .pos --> tuple = (x_pos,y_pos)\n\n\tif quit_flag:\n\t\tgame_over(background, 0)\n\telse:\n\t\tpass\n\n\tif reset_flag:\n\t\tmouse_position = []\n\t\tmachine_position = []\n\t\tfirst_chess = int(random.random()*5)\n\t\tsecond_chess = int(random.random()*5)\n\t\treset_flag = False\n\n\trect_point = []\n\tbackground.fill(bg_color)\n\tdraw_chessboard_rect(background, rect_point, border)\n\tdraw_font(background, wlc_str, 20, BLACK, position=(10,20))\n\t\n\tchess_dict_all = {}\n\tchess_dict_mouse = {}\n\tchess_dict_machine = {}\n\n\tdraw_chess_position(not mode_flag, mouse_position, machine_position, chess_dict_all, chess_dict_mouse, chess_dict_machine)\n\tprint(\"chess_dict_all = \",chess_dict_all)\n\tprint(\"chess_dict_mouse = \",chess_dict_mouse)\n\tprint(\"chess_dict_machine = \",chess_dict_machine)\n\n\tdraw_font(background, \"total:{} white wins:{} black wins:{}\".format(game_info[0],game_info[1],game_info[2]), 20, BLACK, ((width-450),10))\n\tend_flag = success_judge(chess_dict_all)\n\tif end_flag:\n\t\tdraw_font(background, \"Congradulations! \"+successor+\" wins!\", 20, RED, (int((width-300)/2),30))\n\t\tdraw_font(background, \"Please press ENTER to restart!\", 40, WHITE, (int((width-650)/2),int((height-50)/2)))\n\t\tif not info_flag:\n\t\t\ttotal = total + 1\n\t\t\tif successor == \"White\":\n\t\t\t\twhite_win = white_win + 1\n\t\t\telif successor == \"Black\":\n\t\t\t\tblack_win = black_win + 1\n\t\t\tgame_info = [total, white_win, black_win]\n\t\t\tinfo_flag = not info_flag\n\n\tscreen.blit(background, (0,0))\n\tfclock.tick(fps)\n\tpygame.display.update()\n\n","sub_path":"gobang/Gobang_v2.1.py","file_name":"Gobang_v2.1.py","file_ext":"py","file_size_in_byte":14719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"370920283","text":"from PymoNNto.Exploration.UI_Base import *\nfrom PymoNNto.NetworkBehaviour.Recorder.Recorder import *\n\nclass Network_UI(UI_Base):\n\n def __init__(self, network, modules=[], label='SORN UI', group_tags=[], transmitters=[], storage_manager=None, group_display_count=None, reduced_layout=False):\n\n network.simulate_iteration()\n\n network.clear_recorder()\n\n self.render_every_x_frames=1\n\n #network.simulate_iteration()\n #self.recording = False\n #if self.recording:\n # label += ' rec.'\n\n for ng in network.NeuronGroups:\n if not hasattr(ng, 'color'):\n ng.color = (0, 0, 255, 255)\n\n if group_tags==[]:\n for ng in network.NeuronGroups:\n if ng.tags[0] not in group_tags:\n group_tags.append(ng.tags[0])\n\n if transmitters==[]:\n for sg in network.SynapseGroups:\n if sg.tags[0] not in transmitters:\n transmitters.append(sg.tags[0])\n\n self.reduced_layout=reduced_layout\n\n #for group in network[inh_group_name]:\n # network.add_behaviours_to_neuron_group({10000: Recorder(['np.mean(n.output)',\n # 'np.mean(n.TH)',\n # 'n.TH',\n # 'n.excitation',\n # 'n.inhibition',\n # 'n.input_act',\n # 'n.refractory_counter',\n # '[np.sum(s.slow_add) for s in n.afferent_synapses.get(\"All\")]',\n # '[np.sum(s.fast_add) for s in n.afferent_synapses.get(\"All\")]'], tag='UI_rec')}, group)\n\n super().__init__(network, label=label)\n\n self.main_window.keyPressEvent = self.keyPressEvent\n\n self.group_display_count = group_display_count\n\n #self.exc_group_name = exc_group_name\n #self.inh_group_name = inh_group_name\n self.group_tags = group_tags\n self.transmitters=transmitters\n self.pause = False\n self.update_without_state_change = False\n self.storage_manager = storage_manager\n\n self.neuron_select_x = 0\n self.neuron_select_y = 0\n self.neuron_select_id = 0\n self.neuron_select_group = group_tags[0]#exc_group_name\n self.neuron_visible_groups = []\n #self.ts_group = 0\n #self.x_steps = 500\n #self.group_sliders = []\n self.neuron_select_color = (0, 255, 0, 255)\n\n self.modules = modules\n\n for module in self.modules:\n print('Initialize:', module)\n module.initialize(self)\n\n for group_tag in group_tags:\n for group in network[group_tag]:\n\n group._rec_dict = {}\n\n #rec = Recorder([], tag='UI_rec')\n #network.add_behaviours_to_neuron_group({10000: rec}, group)\n\n for module in self.modules:\n module.add_recorder_variables(group, self)\n\n self.init_recoders()\n\n timer = QtCore.QTimer(self)\n timer.timeout.connect(self.on_timer)\n timer.start(40)\n\n def add_recording_variable(self, group, var, timesteps):\n\n try:\n n = group # for eval\n eval(var) #produce error when not evaluable\n\n old_ts=0\n if var in group._rec_dict:\n old_ts=group._rec_dict[var]\n\n group._rec_dict[var] = max(timesteps,old_ts)\n #recorder.add_varable('n.output')\n return True\n except:\n return False\n\n def init_recoders(self):\n for group_tag in self.group_tags:\n for group in self.network[group_tag]:\n\n rec_time_dict={}\n for variable in group._rec_dict:\n rec_length=group._rec_dict[variable]\n if rec_length not in rec_time_dict:\n rec_time_dict[rec_length]=[]\n rec_time_dict[rec_length].append(variable)\n\n for rec_length in rec_time_dict:\n rec = Recorder(rec_time_dict[rec_length] + ['n.iteration'], tag='UI_rec,rec_' + str(rec_length), max_length=rec_length)\n self.network.add_behaviours_to_object({10000+rec_length: rec}, group)\n\n #def rec(self, neuron_group, rec_length=-1):\n # return neuron_group[self.rec_tag(rec_length),0]\n\n #def rec_tag(self, rec_length=-1):\n # if rec_length==-1:\n # rec_length = self.default_rec_recording_length\n # return 'rec_'+str(rec_length)\n\n def static_update_func(self, event=None):\n if self.pause:\n self.update_without_state_change=True\n\n def get_selected_neuron_subgroup(self):\n syn_sgs = self.get_selected_synapses()\n if len(syn_sgs) > 0:\n return syn_sgs[0]\n else:\n return None\n\n def get_selected_synapses(self):\n result = []\n if len(self.network[self.neuron_select_group]) > 0:\n group = self.network[self.neuron_select_group, 0]\n synapse_groups = group.afferent_synapses['All']\n for i, s in enumerate(synapse_groups):\n if (type(s.dst.mask) == np.ndarray and s.dst.mask[self.neuron_select_id]) or (type(s.dst.mask) is bool and s.dst.mask == True):\n result.append(synapse_groups[i].dst)\n return result\n\n def get_combined_syn_mats(self, synapses, neuron_id=None, attr='W'):\n results = {}\n shapes = {}\n for s in synapses:\n base_src = s.src.group_without_subGroup()\n base_dst = s.dst.group_without_subGroup()\n key = ','.join(s.tags)\n if not key in results:\n results[key] = np.zeros((base_dst.size, base_src.size))\n shapes[key] = (base_src.height, base_src.width)\n try:\n syn_mat=eval('s.'+attr)\n if base_src == s.src and base_dst == s.dst:\n results[key] += syn_mat#.copy()\n else:\n mat_mask = s.dst.mask[:, None] * s.src.mask[None, :]\n results[key][mat_mask] += np.array(syn_mat).flatten() #np.array required if syn_mat is bool (enabled)\n except:\n print(attr, \"cannot be evaluated\")\n\n if neuron_id is not None:\n for key in results:\n results[key] = results[key][neuron_id].reshape(shapes[key])\n\n return results\n\n\n def on_timer(self):\n\n if not self.pause or self.step or self.update_without_state_change:\n self.step = False\n if not self.update_without_state_change:\n for i in range(self.render_every_x_frames):\n self.network.simulate_iteration()\n\n self.it = self.network.iteration\n\n for module in self.modules:\n module.update(self)\n\n #for rec in self.network['UI_rec']:\n # rec.cut_length(self.default_recorder_length)\n\n self.update_without_state_change = False\n\n\n def keyPressEvent(self, event):\n if event.key() == QtCore.Qt.Key_W:\n indx = self.tabs.currentIndex()\n if indx >= 0:\n widget=self.tabs.currentWidget()\n self.tabs.removeTab(indx)\n widget.setParent(None)\n widget.show()\n\n\n\n\n\n\n\ndef get_color(type_index, layer):\n dim_value = max(layer * 0.9, 1.0)\n\n if type_index == 0:\n return (0.0, 0.0, 255.0 / dim_value, 255.0)\n if type_index == 1:\n return (255.0 / dim_value, 0.0, 0.0, 255.0)\n if type_index == 2:\n return (255.0 / dim_value, 150.0 / dim_value, 0.0, 255.0)\n if type_index == 3:\n return (255.0 / dim_value, 80.0 / dim_value, 0.0, 255.0)\n if type_index == 4:\n return (255.0 / dim_value, 0.0 , 150.0/ dim_value, 255.0)\n\n\n'''\ndef get_color(type_index, layer):\n dim_value = max(layer * 1.0, 1.0)\n\n if type_index == 0:\n return (0.0, 0.0, 255.0 / dim_value, 255.0)\n if type_index == 1:\n return (255.0 / dim_value, 0.0, 0.0, 255.0)\n if type_index == 2:\n return (255.0 / dim_value, 150.0 / dim_value, 0.0, 255.0)\n if type_index == 3:\n return (255.0 / dim_value, 80.0 / dim_value, 0.0, 255.0)\n if type_index == 4:\n return (255.0 / dim_value, 0.0 , 150.0/ dim_value, 255.0)\n'''\n\n########################################################### Exception handling\n\n'''\n\nfrom io import StringIO\nimport traceback\nfrom PyQt5 import QtCore\nfrom PyQt5.QtWidgets import *\n#import time\n\ndef excepthook(excType, excValue, tracebackobj):\n \"\"\"\n Global function to catch unhandled exceptions.\n\n @param excType exception type\n @param excValue exception value\n @param tracebackobj traceback object\n \"\"\"\n separator = '-' * 80\n logFile = \"simple.log\"\n notice = \\\n \"\"\"An unhandled exception occurred. Please report the problem\\n\"\"\" \\\n \"\"\"using the error reporting dialog or via email to <%s>.\\n\"\"\" \\\n \"\"\"A log has been written to \"%s\".\\n\\nError information:\\n\"\"\" % \\\n (\"yourmail at server.com\", \"\")\n versionInfo = \"0.0.1\"\n timeString = time.strftime(\"%Y-%m-%d, %H:%M:%S\")\n tbinfofile = StringIO()\n traceback.print_tb(tracebackobj, None, tbinfofile)\n traceback.print_stack()\n #traceback.print_exc()\n #tbinfofile.seek(0)\n #tbinfo = tbinfofile.read()\n #errmsg = '%s: \\n%s' % (str(excType), str(excValue))\n #sections = [separator, separator, errmsg, separator, tbinfo]\n #msg = '\\n'.join(sections)\n #try:\n # f = open(logFile, \"w\")\n # f.write(msg)\n # f.write(versionInfo)\n # f.close()\n #except IOError:\n # pass\n #errorbox = QMessageBox()\n #errorbox.setText(str(notice) + str(msg) + str(versionInfo))\n #errorbox.exec_()\n'''\n\ndef except_hook(cls, exception, traceback):\n sys.__excepthook__(cls, exception, traceback)\n\nsys.excepthook = except_hook\n\n\n\n\n#from Exploration.Visualization.Visualization_Helper import *\n\n#sys._excepthook = sys.excepthook\n#def exception_hook(exctype, value, traceback):\n# print(exctype, value, traceback)\n# sys._excepthook(exctype, value, traceback)\n# sys.exit(1)\n#sys.excepthook = exception_hook\n\n\n\n\n\n# def record_frame(self, item=None, key='frame', width=100):\n# if self.storage_manager is not None and item is not None:\n# exporter = pg.exporters.ImageExporter(item)\n# #exporter.parameters()['width'] = width # (note this also affects height parameter)\n\n# exporter.params.param('width').setValue(150, blockSignal=exporter.widthChanged)\n# exporter.params.param('height').setValue(120, blockSignal=exporter.heightChanged)\n\n# next=self.storage_manager.get_next_frame_name(key)\n# exporter.export(next)\n\n\n#screen = QApplication.primaryScreen()\n#p = screen.grabWindow(self.main_window.winId())\n#p.save('test{}.png'.format(self.network.iteration), 'png')\n\n'''\n def keyPressEvent(self, event):\n if event.key() == QtCore.Qt.Key_Left:\n self.ny-=1\n if event.key() == QtCore.Qt.Key_Right:\n self.ny+=1\n if event.key() == QtCore.Qt.Key_Up:\n self.nx+=1\n if event.key() == QtCore.Qt.Key_Down:\n self.nx-=1\n self.nx = np.clip(self.nx, 0, self.ng_width - 1)\n self.ny = np.clip(self.ny, 0, self.ng_height - 1)\n self.n_id=self.ny*self.ng_width+self.nx\n'''\n\n\n# if not self.update_without_state_change:\n# self.avg_big_synapses_data.append(np.average(np.sum(GLU_syn > (np.max(GLU_syn, axis=1) * (1 / 2))[:, None], axis=0)))\n# self.neuron_big_synapses_data.append(np.sum(GLU_syn[self.neuron_select_id] > (np.max(GLU_syn[self.neuron_select_id]) * (1 / 2))))\n\n# self.avg_big_synapses_curve.setData(np.arange(it - len(self.avg_big_synapses_data), it), self.avg_big_synapses_data)\n# self.neuron_big_synapses_curve.setData(np.arange(it-len(self.neuron_big_synapses_data), it), self.neuron_big_synapses_data)\n\n\n'''\nGLU_syn = self.get_combined_syn_mat(self.network[self.neuron_select_group, ts_group]['GLU'])[0]\n\nselected_GLU_syn = GLU_syn[self.neuron_select_id]\n\nGABA_syn = self.network[self.neuron_select_group, ts_group]['GABA']\nif len(GABA_syn) > 0:\n GABA_syn = self.get_combined_syn_mat(GABA_syn)[0]\n selected_GABA_syn = GABA_syn[self.neuron_select_id]\nelse:\n GABA_syn = None\n\n\n\n\n\nasfdsf\n\n\n\n#exc_shape = (self.network[self.exc_group_name, ts_group].height, self.network[self.exc_group_name, ts_group].width)\nw_img = np.reshape(selected_GLU_syn, exc_shape)\nself.weight_GLU_items[0].setImage(w_img)\n\nif GABA_syn is not None and selected_GABA_syn is not None:\n #inh_shape = (self.network[self.inh_group_name, ts_group].height, self.network[self.inh_group_name, ts_group].width)\n w_img = np.reshape(selected_GABA_syn, inh_shape)\n self.weight_GABA_items[0].setImage(w_img)\nelse:\n self.weight_GABA_items[0].clear()\n'''\n\n'''\n self.graph = pg.GraphItem()\n p=self.Add_plot('', True)\n p.addItem(self.graph)\n #self.Add_Sidebar_Element(self.graph)\n #self.inp_text_label.setText('test')\n\n # Define positions of nodes\n pos = np.array([\n [0, 0],\n [10, 0],\n [0, 10],\n [10, 10],\n [5, 5],\n [15, 5]\n ], dtype=float)\n\n # Define the set of connections in the graph\n adj = np.array([\n [0, 1],\n [1, 3],\n [3, 2],\n [2, 0],\n [1, 5],\n [3, 5],\n ])\n\n # Define the symbol to use for each node (this is optional)\n symbols = ['o', 'o', 'o', 'o', 'o', 'o']\n\n # Define the line style for each connection (this is optional)\n lines = np.array([\n (255, 255, 255, 255, 1),\n (255, 255, 255, 255, 2),\n (255, 255, 255, 255, 3),\n (255, 255, 255, 255, 2),\n (255, 255, 255, 255, 1),\n (255, 255, 255, 255, 4),\n ], dtype=[('red', np.ubyte), ('green', np.ubyte), ('blue', np.ubyte), ('alpha', np.ubyte), ('width', float)])\n\n # Define text to show next to each symbol\n texts = [\"Point %d\" % i for i in range(6)]\n\n # Update the graph\n self.graph.setData(pos=pos, adj=adj, pen=lines, size=1, symbol=symbols, pxMode=False, text=texts)\n #https://stackoverflow.com/questions/46868432/pyqtgraph-change-color-of-node-and-its-edges-on-click\n'''\n\n# def show_info(event):\n# self.info_window.show()\n\n# self.info_btn = QPushButton('info', self.main_window)\n# self.info_btn.clicked.connect(show_info)\n# self.Add_Sidebar_Element(self.info_btn)\n'''\nself.gaba=None\ndef click2(event):\n if self.gaba is not None:\n print('a')\n self.network.NeuronGroups[0].afferent_synapses['GABA'] += self.gaba\n self.gaba=None\n else:\n print('b')\n self.gaba=self.network.NeuronGroups[0].afferent_synapses['GABA']\n self.network.NeuronGroups[0].afferent_synapses['GABA']=[x for x in self.network.NeuronGroups[0].afferent_synapses['GABA'] if x not in self.gaba]\nself.btn2 = QPushButton('inhibition on/off', self.main_window)\nself.btn2.clicked.connect(click2)\nself.Add_Sidebar_Element(self.btn2)\n'''\n\n# canvas = pg.GraphicsLayoutWidget()\n# canvas.setBackground((255, 255, 255))\n# self.Add_Sidebar_Element(canvas)\n# self.plot_main = canvas.addPlot(row=0, col=0)\n# self.plot_main.hideAxis('left')\n# self.plot_main.hideAxis('bottom')\n# self.main_item = pg.ImageItem(np.random.rand(291, 291, 3))\n# self.plot_main.addItem(self.main_item)\n\n# p.addItem(pg.TextItem(text=, color=(0, 255, 0), anchor=(0, 0))) # , html='
'\n\n\n# for i,c in enumerate():\n# p.addItem(pg.TextItem(text=c,color=(0,255,0),anchor=(1, i/2)))#, html='
'\n\n# self.graph.show()\n# self.Add_Sidebar_Element(self.graph)\n# self.inp_text_label.setText('test')\n\n# self.network.NeuronGroups = [self.network.NeuronGroups[0]]\n# self.network.SynapseGroups = [self.network.SynapseGroups[0]]\n\n\n# self.network.simulate_iterations(1000, 100, measure_block_time=True)\n\n# self.ng_width = self.network[self.group_name].width\n# self.ng_height = self.network[self.group_name].height\n\n# self.inh_ng_width = self.network[self.inh_group_name].width\n# self.inh_ng_height = self.network[self.inh_group_name].height","sub_path":"Exploration/Network_UI/Network_UI.py","file_name":"Network_UI.py","file_ext":"py","file_size_in_byte":16334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"545434028","text":"import requests\nimport json\nimport pandas as pd\nfrom newsapi.newsapi_client import NewsApiClient\nimport re\n\n# # Extracting:\nnewsapi = NewsApiClient(api_key='af7eaa1b2a2f45bfa765b99def007d26')\n\nurl1 = newsapi.get_everything(q='Canada',\n language='en',\n sort_by='relevancy',\n page_size=100);\nurl2 = newsapi.get_everything(q='University',\n language='en',\n sort_by='relevancy',\n page_size=100)\nurl3 = newsapi.get_everything(q='Dalhousie+University',\n language='en',\n sort_by='relevancy',\n page_size=100)\nurl4 = newsapi.get_everything(q='Halifax',\n language='en',\n sort_by='relevancy',\n page_size=100)\nurl5 = newsapi.get_everything(q='Canada+Education',\n language='en',\n sort_by='relevancy',\n page_size=100)\n\nurl1 = url1['articles']\nurl2 = url2['articles']\nurl3 = url3['articles']\nurl4 = url4['articles']\nurl5 = url5['articles']\n\nurl = url1 + url2 + url3 + url4 + url5\n\ndata = json.dumps(url)\ndata = json.loads(data)\n\n#Cleaning function\ndef cleaning(inputString):\n inputString = re.sub(r'\\[[^\\]]*\\]','',inputString)\n inputString = re.sub(r'http\\S+', '', inputString)\n inputString = re.sub('[^A-Za-z0-9]+',' ', inputString)\n return inputString\n\nfor x in range(len(data)):\n data[x]['content'] = cleaning(str(data[x]['content']))\n\nwith open('newAPI_Data.json','w') as outfile:\n json.dump(data,outfile,indent=4)","sub_path":"newsAPIdataExtraction.py","file_name":"newsAPIdataExtraction.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"298627976","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport utils\nimport numpy as np\nimport argparse\nimport pandas as pd\nimport networkx as nx\n\nfrom tqdm import tqdm\nfrom collections import defaultdict\nfrom utils import create_list_nodes, construct_graph\n\n\ndef create_behavior_df(folder, l_functions):\n \"\"\"Create df containing the features from behavior files.\n \"\"\"\n d_out = defaultdict(dict)\n l_files = os.listdir(folder)\n l_files = [f for f in l_files if \"behavior\" in f]\n for file_2 in tqdm(l_files, desc=\"Computing behavior features\"):\n data = utils.read_behavior_data(os.path.join(folder, file_2))\n indice = utils.get_index(file_2)\n split = utils.get_split(file_2)\n for func in l_functions:\n d_out[indice].update(func(data))\n d_out[indice].update({\"split\": split})\n df = utils.convert_features_dic_to_df(d_out)\n df = df.sort_index()\n return df\n\n\ndef create_behavior_df_path(paths, l_functions):\n d_out = defaultdict(dict)\n for path in tqdm(paths, desc=\"Computing behavior features\"):\n data = utils.read_behavior_data(path)\n indice = utils.get_index(path)\n split = utils.get_split(path)\n for func in l_functions:\n d_out[indice].update(func(data))\n d_out[indice].update({\"split\": split})\n df = utils.convert_features_dic_to_df(d_out)\n df = df.sort_index()\n return df\n\n\ndef create_labels_df(labels_filepath):\n with open(labels_filepath, \"r\") as f:\n labels = list(f.read())\n labels = [int(label) for label in labels]\n return pd.DataFrame(labels)\n\n\ndef get_consecutive_same_behavior(df):\n def get_consecutive_ones(l):\n i = 0\n consecutive = []\n while i < len(l) :\n if l[i] == 1:\n j = 1\n while i + j < len(l) and l[i + j] == 1:\n j += 1\n consecutive.append(j + 1)\n i += j\n else:\n i += 1\n return consecutive\n is_equal_to_previous = list((df == df.shift()).apply(\n lambda row: row[\"subprocess_id\"] and row[\"rip\"] and row[\"api\"], axis=1\n ).astype(int))\n consecutive = get_consecutive_ones(is_equal_to_previous)\n return {\n \"consecutive_mean\": np.mean(consecutive) if consecutive else 0,\n \"consecutive_std\": np.std(consecutive) if consecutive else 0,\n \"consecutive_max\": np.max(consecutive) if consecutive else 0,\n \"consecutive_quantile_25\": np.percentile(consecutive, 25) if consecutive else 0,\n \"consecutive_quantile_75\": np.percentile(consecutive, 75) if consecutive else 0,\n \"consecutive_median\": np.median(consecutive) if consecutive else 0\n }\n\n\ndef get_nb_distinct_apis(df):\n \"\"\"Get the number of distinct apis per process.\"\"\"\n return {\"nb_unique_apis\": len(set(df.api))}\n\n\ndef get_nb_api_calls(df):\n \"\"\"Get the number of calls per api across the subprocesses.\"\"\"\n d = dict(df.groupby(\"api\")[\"api\"].count())\n d = {\"_\".join((\"num_calls\", k)): v for (k, v) in d.items()}\n return d\n\n\ndef get_bigram(df):\n bigrams = pd.DataFrame(df[\"api\"].cumsum().apply(lambda x: x[-16:])[1:])\n bigrams[\"count\"] = [1 for _ in range(bigrams.shape[0])]\n return bigrams.groupby(\"api\").count().sort_values(by=\"count\", ascending=False).to_dict()[\"count\"]\n\n\ndef get_nb_rip(df):\n nb_rip = len(set(df[\"rip\"]))\n return {\"nb_rip_unique\": nb_rip}\n\n\ndef get_nb_rip_by_subprocess(df):\n # number of counts\n group_by_rip = df[[\"subprocess_id\", \"rip\"]].groupby(\"subprocess_id\")\n l_counts = group_by_rip.count().values\n mean = l_counts.mean()\n std = l_counts.std()\n sum_var = l_counts.sum()\n median = np.median(l_counts)\n quantile_25 = np.percentile(l_counts, 25)\n quantile_75 = np.percentile(l_counts, 75)\n\n l_counts_unique = group_by_rip.nunique().values\n mean_unique = l_counts_unique.mean()\n std_unique = l_counts_unique.std()\n sum_var_unique = l_counts_unique.sum()\n median_unique = np.median(l_counts_unique)\n quantile_25_unique = np.percentile(l_counts_unique, 25)\n quantile_75_unique = np.percentile(l_counts_unique, 75)\n\n return {\n \"nb_rip_sub_median\": median,\n \"nb_rip_sub_mean\": mean,\n \"nb_rip_sub_std\": std,\n \"nb_rip_sub_quantile_25\": quantile_25,\n \"nb_rip_sub_quantile_75\": quantile_75,\n \"nb_unique_rip_sub_median\": median_unique,\n \"nb_unique_rip_sub_mean\": mean_unique,\n \"nb_unique_rip_sub_std\": std_unique,\n \"nb_unique_rip_sub_quantile_25\": quantile_25_unique,\n \"nb_unique_rip_sub_quantile_75\": quantile_75_unique,\n \"nb_rip_sum\": sum_var,\n \"sum_var_unique\": sum_var_unique,\n }\n\n\ndef get_nb_api_by_rip(df):\n # number of counts\n group_by_rip = df[[\"rip\", \"api\"]].groupby(\"rip\")\n l_counts = group_by_rip.count().values\n mean = l_counts.mean()\n std = l_counts.std()\n median = np.median(l_counts)\n quantile_25 = np.percentile(l_counts, 25)\n quantile_75 = np.percentile(l_counts, 75)\n\n # sum_var = l_counts.sum()\n\n l_counts_unique = group_by_rip.nunique().values\n mean_unique = l_counts_unique.mean()\n std_unique = l_counts_unique.std()\n sum_var_unique = l_counts_unique.sum()\n median_unique = np.median(l_counts_unique)\n quantile_25_unique = np.percentile(l_counts_unique, 25)\n quantile_75_unique = np.percentile(l_counts_unique, 75)\n\n return {\n \"nb_api_rip_median\": median,\n \"nb_api_rip_mean\": mean,\n \"nb_api_rip_std\": std,\n \"nb_api_rip_quantile_25\": quantile_25,\n \"nb_api_rip_quantile_75\": quantile_75,\n \"nb_unique_api_rip_median\": median_unique,\n \"nb_unique_api_rip_mean\": mean_unique,\n \"nb_unique_api_rip_std\": std_unique,\n \"nb_unique_api_rip_sum\": sum_var_unique,\n \"nb_unique_api_rip_quantile_25\": quantile_25_unique,\n \"nb_unique_api_rip_quantile_75\": quantile_75_unique,\n # \"nb_api_rip_sum\": sum_var\n }\n\n\ndef get_nb_api_by_subprocess(df):\n # number of counts\n group_by_rip = df[[\"subprocess_id\", \"api\"]].groupby(\"subprocess_id\")\n l_counts = group_by_rip.count().values\n mean = l_counts.mean()\n std = l_counts.std()\n sum_var = l_counts.sum()\n median = np.median(l_counts)\n quantile_25 = np.percentile(l_counts, 25)\n quantile_75 = np.percentile(l_counts, 75)\n\n l_counts_unique = group_by_rip.nunique().values\n mean_unique = l_counts_unique.mean()\n std_unique = l_counts_unique.std()\n sum_var_unique = l_counts_unique.sum()\n median_unique = np.median(l_counts_unique)\n quantile_25_unique = np.percentile(l_counts_unique, 25)\n quantile_75_unique = np.percentile(l_counts_unique, 75)\n\n return {\n \"nb_api_sub_mean\": mean,\n \"nb_api_sub_median\": median,\n \"nb_api_sub_std\": std,\n \"nb_api_sub_quantile_25\": quantile_25,\n \"nb_api_sub_quantile_75\": quantile_75,\n \"nb_unique_api_sub_median\": median_unique,\n \"nb_unique_api_sub_mean\": mean_unique,\n \"nb_unique_api_sub_std\": std_unique,\n \"nb_unique_api_sub_quantile_25\": quantile_25_unique,\n \"nb_unique_api_sub_quantile_75\": quantile_75_unique,\n \"nb_api_sum\": sum_var,\n \"sum_var_unique\": sum_var_unique\n }\n\n\ndef create_graph_features(df, features=[\"nb_cycles\", \"density\", \n \"betweenness_centrality\",\n \"in_degree_centrality\",\n \"out_degree_centrality\",\n \"closeness_centrality\"]):\n subprocess_to_features = defaultdict(list)\n subprocess_to_nodes = create_list_nodes(df)\n for subprocess in subprocess_to_nodes.keys():\n # Create graph\n G = nx.DiGraph()\n edges = construct_graph(subprocess_to_nodes[subprocess])\n G.add_edges_from(edges)\n\n # Compute features\n # Number of cycles\n if \"nb_cycles\" in features:\n nb_cyles = len(list(nx.simple_cycles(G)))\n subprocess_to_features[\"nb_cycles\"].append(nb_cyles)\n # Density\n if \"density\" in features:\n density = nx.density(G)\n subprocess_to_features[\"density\"].append(density)\n\n # Centralities\n if \"betweenness_centrality\" in features:\n betweenness_centrality = nx.betweenness_centrality(G)\n subprocess_to_features[\"betweenness_centrality\"].append(max(betweenness_centrality.values()))\n\n if \"in_degree_centrality\" in features:\n in_degree_centrality = nx.in_degree_centrality(G)\n subprocess_to_features[\"in_degree_centrality\"].append(max(in_degree_centrality.values()))\n\n if \"out_degree_centrality\" in features:\n out_degree_centrality = nx.out_degree_centrality(G)\n subprocess_to_features[\"out_degree_centrality\"].append(max(out_degree_centrality.values()))\n\n if \"closeness_centrality\" in features:\n closeness_centrality = nx.closeness_centrality(G)\n subprocess_to_features[\"closeness_centrality\"].append(max(closeness_centrality.values()))\n aggregations = [(np.mean, \"mean\"), (np.std, \"std\"), (np.max, \"max\")]\n features_out = {}\n for agg in aggregations:\n for feat in features:\n features_out[feat + \"_\" + agg[1]] = agg[0](np.array(subprocess_to_features[feat]))\n\n return features_out\n\n\nif __name__ == \"__main__\":\n L_FUNCTIONS = [\n get_nb_rip,\n get_nb_api_by_rip,\n get_nb_distinct_apis,\n get_nb_api_calls,\n get_consecutive_same_behavior,\n get_nb_api_by_subprocess,\n ]\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--input-folder\",\n type=str,\n help=\"Folder containing 'training_index_behavior_sequence'\")\n parser.add_argument(\n \"--output-folder\",\n type=str,\n help=\"Folder to store the resulting dataframe\")\n parser.add_argument(\n \"--output-filename\",\n type=str,\n help=\"Name for the output csv\",\n default=\"behavior_features\")\n parser.add_argument(\n \"--labels-file\",\n type=str,\n help=\"txt file with labels\")\n parser.add_argument(\n \"--labels-output-filename\",\n type=str,\n help=\"Name for the labels output csv\")\n args = parser.parse_args()\n\n # Check if output folder exists\n output_folder = \"data\"\n if not os.path.isdir(output_folder):\n os.mkdir(output_folder)\n input_folder = args.input_folder\n if input_folder is not None:\n df = create_behavior_df(input_folder)\n df.to_csv(os.path.join(output_folder, \"{}.csv\".format(args.output_filename)))\n\n #labels_filepath = args.labels_file\n #if labels_filepath is not None:\n #df_labels = create_labels_df(labels_filepath)\n #df_labels.to_csv(os.path.join(output_folder, \"{}.csv\".format(args.labels_output_filename)))\n","sub_path":"features_behavior_file.py","file_name":"features_behavior_file.py","file_ext":"py","file_size_in_byte":10890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"52516727","text":"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import Int32\nfrom geometry_msgs.msg import PoseStamped, Pose\nfrom styx_msgs.msg import TrafficLightArray, TrafficLight\nfrom styx_msgs.msg import Lane\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge\nfrom light_classification.tl_classifier import TLClassifier\nfrom light_msgs.msg import UpcomingLight\n\nimport tf\nimport cv2\nimport yaml\nimport sys\nimport os\nimport math\nimport numpy as np\nimport glob\nimport datetime\n\nfrom tl_debug import TLDebug\n\n##### Model constants ############################\n\npath_to_models = os.path.dirname(os.path.realpath(__file__)) + '/light_classification/models'\n\n# Dictionary for model checkpoints, label maps and numbers of classes\nMODEL_DICT = {1: (path_to_models + '/graph_frcnn_resnet_sim_bosch.pb',\n path_to_models + '/label_map_bosch.pbtxt',\n 14),\n 2: (path_to_models + '/graph_frcnn_resnet_real_udacity.pb',\n path_to_models + '/label_map_udacity.pbtxt',\n 4),\n 3: (path_to_models + '/graph_ssd_mobilenet_sim.pb',\n path_to_models + '/label_map_udacity.pbtxt',\n 4)\n }\n\n##### Constants ###############################################################\n\n# Distance Threshold to the next traffic light in order to avoid processing of\n# the image in order to detect the color of the traffic light indication.\nVISIBLE_DISTANCE = 200\n\n# On/Off switch for classifier.\nCLF_ON = True\n\n# On/Off switch for enabling debug.\nDEBUG_ON = True\n\n# Use the predicted light state. \n# Otherwise, use true light state from TrafficLightArray message.\n# Note: set True only if CLF_ON is True.\nUSE_PREDICTION = True\n\n# Minimum score (confidence) for a light detection\nSCORE_THRESHOLD = 0.5\n\nSTATE_COUNT_THRESHOLD = 3\n\n# DISCARD_NUMBER_IMAGES = 8\n\n##### TLDetector Class ########################################################\n\nclass TLDetector(object):\n def __init__(self):\n # Initialization of the Node\n\n rospy.init_node('tl_detector')\n\n # Get configuration of the Node\n \n config_string = rospy.get_param(\"/traffic_light_config\")\n self.config = yaml.load(config_string)\n\n # Initialize properties\n\n self.pose = None\n self.waypoints = None\n self.camera_image = None\n\n self.state = TrafficLight.UNKNOWN\n self.last_state = TrafficLight.UNKNOWN\n self.last_wp = -1\n self.state_count = 0\n\n self.lights = []\n self.all_stop_line_wps = None\n self.stop_line_positions = self.config['stop_line_positions']\n\n # The closest waypoint to car\n self.car_position = None\n\n #self.discard_number_images = 0\n\n # Find whether simulator config or site config is introduced\n\n self.is_running_simulator = False # By default is not in simulation\n model_id = 2 # By default uses the udacity real model\n if len(self.config['stop_line_positions']) > 1:\n model_id = 1\n self.is_running_simulator = True\n\n # Subscribe to the Topics\n\n sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)\n sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)\n\n '''\n /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and \n helps you acquire an accurate ground truth data source for the traffic light\n classifier by sending the current color state of all traffic lights in the\n simulator. When testing on the vehicle, the color state will not be available. You'll need to\n rely on the position of the light and the camera image to predict it.\n '''\n sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb) \n\n # Create the Publishers\n\n self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)\n\n self.upcoming_light_pub = rospy.Publisher('/upcoming_light', UpcomingLight, queue_size=1)\n\n # Create OpneCv Bridge for Ros\n\n self.bridge = CvBridge()\n\n # Initialize classifier with specified parameters\n if CLF_ON is True:\n print(\"Loading inference graph ...\")\n ckpt, label_map, n_classes = MODEL_DICT[model_id]\n\n # Generate the model PD file\n self.prepare_model_file(ckpt)\n\n # Initialize Classifier\n self.light_classifier = TLClassifier(ckpt, label_map, n_classes, SCORE_THRESHOLD)\n self.light_classifier_on = True\n print(\"Light classifier is running\")\n\n # Setup the transform listener for coordinates transformaton\n\n self.listener = tf.TransformListener()\n\n # Setup the debug for the detector\n\n if DEBUG_ON:\n self.debug = TLDebug()\n\n # Subscribe to the Car Camera Image\n\n sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)\n\n rospy.spin()\n\n\n def prepare_model_file(self, model_path):\n \"\"\"Check if the model is in a single file or splitted in several files.\n If the file is splitted in several files, it creates a single file with\n all the parts in the right order.\n\n Args:\n model_path (String): model filename\n\n \"\"\"\n if not os.path.exists(model_path): \n wildcard = model_path.replace('.pb','.*')\n files = sorted([file for file in glob.glob(wildcard)])\n\n join_command = 'cat {} > {}'.format(\" \".join(files), model_path)\n os.system(join_command)\n\n def pose_cb(self, msg):\n self.pose = msg\n\n\n def waypoints_cb(self, waypoints):\n self.waypoints = waypoints\n\n # Since we get the list of waypoints, now we can identify the waypoints\n # where the stop lines are\n if self.all_stop_line_wps == None and self.waypoints != None:\n self.all_stop_line_wps = self.get_all_stop_line_wps(self.stop_line_positions)\n\n\n def traffic_cb(self, msg):\n self.lights = msg.lights\n\n\n def image_cb(self, msg):\n \"\"\"Identifies red lights in the incoming camera image and publishes the index\n of the waypoint closest to the red light's stop line to /traffic_waypoint\n\n Args:\n msg (Image): image from car-mounted camera\n\n \"\"\"\n # TODO: This is for the problem that happends when the classifier takes so much time\n # and the images pile up. Verify if we need this with GPU.\n # if self.discard_number_images > 0:\n # self.discard_number_images -= 1\n # return\n\n self.has_image = True\n self.camera_image = msg\n\n stop_line_wp, state = self.process_traffic_lights()\n\n '''\n Publish upcoming red lights at camera frequency.\n Each predicted state has to occur `STATE_COUNT_THRESHOLD` number\n of times till we start using it. Otherwise the previous stable state is\n used.\n '''\n if self.state != state:\n self.state_count = 0\n self.state = state\n elif self.state_count >= STATE_COUNT_THRESHOLD:\n self.last_state = self.state\n stop_line_wp = stop_line_wp if state == TrafficLight.RED else -1\n self.last_wp = stop_line_wp\n self.upcoming_red_light_pub.publish(Int32(stop_line_wp))\n else:\n self.upcoming_red_light_pub.publish(Int32(self.last_wp))\n self.state_count += 1\n\n\n def get_unsqrt_distance_between_poses(self, pose_1, pose_2):\n \"\"\"Calculate the unsquared root distance between two poses in a fast way\n To speed up calculations.\n Args:\n pose_1 (Pose): first pose\n pose_2 (Pose): second pose\n\n Returns:\n float: unsquared root distance between two poses\n\n \"\"\"\n\n diff_x = pose_1.position.x - pose_2.position.x\n diff_y = pose_1.position.y - pose_2.position.y\n\n return diff_x*diff_x + diff_y*diff_y\n\n\n def get_distance_between_poses(self, pose_1, pose_2):\n \"\"\"Calculate the distance between two poses\n Args:\n pose_1 (Pose): pose of 1st point\n pose_2 (Pose): pose of 2nd point\n\n Returns:\n float: distance between two poses\n\n \"\"\"\n\n diff_x = pose_1.position.x - pose_2.position.x\n diff_y = pose_1.position.y - pose_2.position.y\n diff_z = pose_1.position.z - pose_2.position.z\n\n return math.sqrt(diff_x*diff_x + diff_y*diff_y + diff_z*diff_z)\n\n\n def get_closest_waypoint(self, pose):\n \"\"\"Identifies the closest path waypoint to the given position\n https://en.wikipedia.org/wiki/Closest_pair_of_points_problem\n http://rosettacode.org/wiki/Closest-pair_problem#Python\n\n Args:\n pose (Pose): position to match a waypoint to\n\n Returns:\n int: index of the closest waypoint (in self.waypoints) to the pose\n\n \"\"\"\n\n min_distance = sys.maxsize\n nearest_waypoint_index = -1\n #print(\"Type of self.waypoints: %s\" % type(self.waypoints))\n\n if self.waypoints != None:\n for i in range(0, len(self.waypoints.waypoints)):\n waypoint = self.waypoints.waypoints[i].pose.pose\n posepoint = pose\n \n # It is not needed to use the sqrt distance, since we need only which waypoint is the nearest.\n # We can use no sqrt distance for fast calculation.\n distance = self.get_unsqrt_distance_between_poses(waypoint, posepoint) \n if distance < min_distance:\n min_distance = distance\n nearest_waypoint_index = i\n\n return nearest_waypoint_index\n\n\n def get_all_stop_line_wps(self, stop_line_positions):\n \"\"\"Find the closest waypoint for each stop line in front of a traffic light\n\n Args:\n stop_line_positions: list of 2D (x, y) position of all stop lines for traffic lights\n\n Returns:\n all_stop_line_wps: list of waypoint indices\n\n \"\"\"\n all_stop_line_wps = []\n pose = Pose()\n\n for i in range(len(stop_line_positions)):\n pose.position.x = stop_line_positions[i][0]\n pose.position.y = stop_line_positions[i][1]\n\n wp = self.get_closest_waypoint(pose)\n all_stop_line_wps.append(wp)\n\n #print(\"Waypoint indices of all stop lines in front of lights:\\n %s\" % all_stop_line_wps)\n return all_stop_line_wps\n\n\n def project_to_image_plane(self, point_in_world):\n \"\"\"Project point from 3D world coordinates to 2D camera image location\n\n Args:\n point_in_world (Point): 3D location of a point in the world\n\n Returns:\n x (int): x coordinate of target point in image\n y (int): y coordinate of target point in image\n\n \"\"\"\n\n #fx = self.config['camera_info']['focal_length_x']\n #fy = self.config['camera_info']['focal_length_y']\n #Current focal lengths are probably wrong, which leads incorrect pixel transformation.\n #https://discussions.udacity.com/t/focal-length-wrong/358568\n\n image_width = self.config['camera_info']['image_width']\n image_height = self.config['camera_info']['image_height']\n\n x, y = None, None\n\n # get transform between pose of camera and world frame\n trans, rot = None, None\n try:\n now = rospy.Time.now()\n self.listener.waitForTransform(\"/base_link\", \"/world\", now, rospy.Duration(1.0))\n (trans, rot) = self.listener.lookupTransform(\"/base_link\", \"/world\", now)\n\n except (tf.Exception, tf.LookupException, tf.ConnectivityException):\n rospy.logerr(\"Failed to find camera to map transform\")\n\n # http://docs.ros.org/jade/api/tf/html/c++/classtf_1_1Transformer.html\n # https://w3.cs.jmu.edu/spragunr/CS354_S14/labs/tf_lab/html/tf.listener.TransformerROS-class.html\n # http://wiki.ros.org/tf/TfUsingPython\n # http://www.cse.psu.edu/~rtc12/CSE486/lecture12.pdf\n # http://www.cse.psu.edu/~rtc12/CSE486/lecture13.pdf\n # http://slideplayer.com/slide/4547175/\n # http://slideplayer.com/slide/4852283/\n # http://www.ics.uci.edu/~majumder/VC/classes/cameracalib.pdf\n # https://stackoverflow.com/questions/5288536/how-to-change-3d-point-to-2d-pixel-location?rq=1\n # ex. trans = [-1230.0457257142773, -1080.1731777599543, -0.10696510000000001]\n # ex. rot = [0.0, 0.0, -0.0436201197059201, 0.9990481896069084]\n # ex. matrix = [[ 9.96194570e-01 8.71572032e-02 0.00000000e+00 -1.23004573e+03]\n # [ -8.71572032e-02 9.96194570e-01 0.00000000e+00 -1.08017318e+03]\n # [ 0.00000000e+00 0.00000000e+00 1.00000000e+00 -1.06965100e-01]\n # [ 0.00000000e+00 0.00000000e+00 0.00000000e+00 1.00000000e+00]]\n \n #### Forward Projection\n #TODO: Debug rot referenced before assignment\n # World to Camera Transformation (Rigid transformation = rotation + translation)\n if trans != None and rot != None:\n transformation_matrix = self.listener.fromTranslationRotation(trans, rot)\n point_in_world_vector = np.array([[point_in_world.x], [point_in_world.y], [point_in_world.z], [1.0]], dtype=float)\n camera_point = np.dot(transformation_matrix, point_in_world_vector)\n\n #print(\"Point in camera coords: %s\" % camera_point)\n\n # Perspective Correction\n # Instead of using the focal lengths in simulator config,\n # we use values in site config by hard coding.\n fx, fy = 1345.200806, 1353.838257\n x = int(-fx * camera_point[1] / camera_point[0] + image_width / 2)\n y = int(-fy * camera_point[2] / camera_point[0] + image_height / 2)\n\n return (x, y)\n\n\n def get_light_state(self, light):\n \"\"\"Determines the current color of the traffic light\n\n Args:\n light (TrafficLight): light to classify\n\n Returns:\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n\n \"\"\"\n if(not self.has_image):\n self.prev_light_loc = None\n return False\n\n cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, \"rgb8\")\n\n #x, y = self.project_to_image_plane(light.pose.pose.position)\n #print(\"Projected point: (%s, %s)\" %(x, y))\n \n # TODO: We need to get the distance to the stop line. Check if we need this apart for the crop method.\n distance_car_tl = self.get_distance_between_poses(self.pose.pose, light.pose.pose) # Need the real distance\n\n #TODO Prepare the image to be classified\n cv_image = self.crop_image(cv_image, distance_car_tl)\n\n #resized_image = cv2.resize(crop_img, (80, 150)) \n\n if DEBUG_ON: # and x != None and y != None:\n self.debug.publish_debug_image(cv_image, distance_car_tl) # Publishing in /debug/image_tl Use 'rqt' to visualize the image\n\n #Get classification\n if self.light_classifier_on is True:\n # Gives central position of the image. This simulates the planar projection method.\n x = int(cv_image.shape[0] / 2)\n y = int(cv_image.shape[1] / 2)\n return self.light_classifier.get_classification(cv_image, (x, y))\n\n return TrafficLight.UNKNOWN\n\n\n def get_upcoming_stop_line_wp(self, car_position, all_stop_line_wps):\n \"\"\"Find the waypoint of the upcoming stop line in front of a light\n\n Args:\n car_position (Int): the closest waypoint to the car\n all_stop_line_wps ([Int]): list of the closest waypoint for each stop line in front of a light\n\n Returns: \n int: the waypoint index of the upcoming stop line in front of a light\n int: the index of the upcoming light in [lights] \n\n \"\"\"\n #Find the interval in which the car is\n interval = 0\n if car_position == 0:\n pass\n else:\n for i in range(len(all_stop_line_wps)):\n if car_position <= all_stop_line_wps[i]:\n interval = i\n break\n #print(\"interval: %s\" % interval)\n\n #Find the upcoming light waypoint and index\n #Note: only go one way along an ascending sequence of waypoints\n stop_line_wp = all_stop_line_wps[interval]\n light_id = interval\n\n return stop_line_wp, light_id\n\n\n def generate_upcominglight_msg(self, waypoint, id, pose, state):\n \"\"\"Generate upcoming light message\n\n Args:\n waypoint: index of waypoint closest to the stop line in front of a traffic light\n id : index of the traffic light in TrafficLightArray\n pose : light pose obtained from /vehicle/traffic_lights\n state : true light state obtained from /vehicle/traffic_lights\n\n Returns:\n msg: message of UpcomingLight type \n\n \"\"\"\n msg = UpcomingLight()\n msg.waypoint = waypoint\n msg.index = id\n msg.pose = pose\n msg.state = state\n return msg\n\n\n def crop_image(self, image, distance):\n \"\"\"Crop the image based on distance\n\n Args:\n image : image to crop\n distance: distance to the stop line\n\n Returns:\n cropped image\n \"\"\"\n result = np.copy(image)\n #print(distance)\n if self.is_running_simulator:\n # calculate top and bottom crop\n top = 0\n bottom = 600\n if distance >= 150:\n top = 530\n bottom = 600\n elif distance >= 55:\n top = 340 + int((distance - 55.0) * ((530.0 - 340.0) / 95.0))\n bottom = 520 + int((distance - 55.0 ) * ((600.0 - 520.0) / 95.0))\n elif distance >= 27:\n top = 0 + int((distance - 27.0) * (340.0 / 28.0))\n bottom = 360 + int((distance - 27.0 ) * (520.0 - 360.0) / 28.0)\n else:\n top = 0\n bottom = 400\n\n result = result[top:bottom]\n \n else:\n # In real carla car, the image contains the front of the car at the bottom of the image,\n # so we can remove that part of the image.\n result = result[0:740]\n\n return result\n\n def process_traffic_lights(self):\n \"\"\"Finds closest visible traffic light, if one exists, and determines its\n location and color\n\n Returns:\n int: index of waypoint closest to the upcoming stop line for a traffic light (-1 if none exists)\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n\n \"\"\"\n light = None\n\n reaching_traffic_light = False\n\n light_id = -1\n stop_line_wp_index = -1\n\n # List of positions that correspond to the line to stop in front of for a given intersection\n stop_line_positions = self.config['stop_line_positions']\n\n # Find the waypoint closest to car's current position\n if(self.pose):\n closest_waypoint_to_car = self.get_closest_waypoint(self.pose.pose)\n if closest_waypoint_to_car != -1:\n self.car_position = closest_waypoint_to_car\n\n # Find the closest waypoint for each traffic light (this is removed since we do it when we get the waypoints)\n # if self.all_stop_line_wps == None and self.waypoints != None:\n # self.all_stop_line_wps = self.get_all_stop_line_wps(stop_line_positions)\n\n ### Find the waypoint and index of the upcoming traffic light\n # 1 - Check that we have traffic lights waypoints and car location\n if self.all_stop_line_wps != None and self.car_position != None:\n # Get the location and index of the upcoming nearest traffic light\n stop_line_wp_index, light_id = self.get_upcoming_stop_line_wp(self.car_position, self.all_stop_line_wps)\n #print(\"Upcoming stop line waypoint and index: %s, %s\" % (stop_line_wp, light_id))\n\n # Find the distance between the car and the upcoming light\n if self.pose != None and stop_line_wp_index != None:\n distance_to_stop_line = self.get_distance_between_poses(self.pose.pose, self.waypoints.waypoints[stop_line_wp_index].pose.pose)\n \n # Check if the car is in the range of VISIBLE_DISTANCE in order to proceed with the classification\n if distance_to_stop_line < VISIBLE_DISTANCE:\n reaching_traffic_light = True\n light = self.lights[light_id]\n\n #print(distance_to_stop_line, VISIBLE_DISTANCE, reaching_traffic_light)\n \n # 2 - Check if we are reaching a traffic light, then try to identify the color\n if reaching_traffic_light:\n \n # Predict the light state\n if USE_PREDICTION:\n pred_state = self.get_light_state(light)\n \n upcoming_msg = self.generate_upcominglight_msg(stop_line_wp_index, light_id, self.lights[light_id].pose, pred_state)\n\n self.upcoming_light_pub.publish(upcoming_msg)\n\n #self.discard_number_images = DISCARD_NUMBER_IMAGES\n\n return stop_line_wp_index, pred_state\n\n # Use ground truth traffic light state to be send to the \n upcoming_msg = self.generate_upcominglight_msg(stop_line_wp_index, light_id, self.lights[light_id].pose, self.lights[light_id].state)\n self.upcoming_light_pub.publish(upcoming_msg)\n return stop_line_wp_index, self.lights[light_id].state\n\n return -1, TrafficLight.UNKNOWN\n\n##### Main ####################################################################\n\nif __name__ == '__main__':\n try:\n TLDetector()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start traffic node.')\n","sub_path":"ros/src/tl_detector/tl_detector.py","file_name":"tl_detector.py","file_ext":"py","file_size_in_byte":22213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"158877797","text":"\nimport rlberry.agents.jax.nets.common as nets\nfrom rlberry.agents.jax.dqn.dqn import DQNAgent\nfrom rlberry.envs import gym_make\nfrom rlberry.stats import AgentStats, MultipleStats, plot_writer_data\n\n\nif __name__ == '__main__':\n\n # global params\n fit_budget = 10000\n n_fit = 2\n\n # env and algorithm params\n env = (gym_make, dict(id='CartPole-v0'))\n params = dict(\n chunk_size=8,\n batch_size=64,\n target_update_interval=500,\n eval_interval=200,\n gamma=0.975,\n lambda_=0.5,\n learning_rate=0.0015,\n net_constructor=nets.MLPQNetwork,\n net_kwargs=dict(\n num_actions=env[0](**env[1]).action_space.n,\n hidden_sizes=(64, 64)\n )\n )\n\n params_alternative = params.copy()\n params_alternative.update(\n dict(\n net_kwargs=dict(\n num_actions=env[0](**env[1]).action_space.n,\n hidden_sizes=(16, 16)\n )\n )\n )\n\n stats = AgentStats(\n DQNAgent,\n env,\n fit_budget=fit_budget,\n eval_env=env,\n init_kwargs=params,\n n_fit=n_fit,\n parallelization='process',\n agent_name='dqn',\n )\n\n stats_alternative = AgentStats(\n DQNAgent,\n env,\n fit_budget=fit_budget,\n eval_env=env,\n init_kwargs=params_alternative,\n n_fit=n_fit,\n parallelization='process',\n agent_name='dqn_smaller_net'\n )\n\n # fit everything in parallel\n mstats = MultipleStats()\n mstats.append(stats)\n mstats.append(stats_alternative)\n mstats.run()\n\n plot_writer_data(mstats.allstats, tag='episode_rewards', show=False)\n plot_writer_data(mstats.allstats, tag='dw_time_elapsed', show=False)\n plot_writer_data(mstats.allstats, tag='eval_rewards', show=False)\n plot_writer_data(mstats.allstats, tag='q_loss')\n\n stats.save()\n stats.clear_output_dir()\n","sub_path":"examples/demo_jax_dqn.py","file_name":"demo_jax_dqn.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"121608715","text":"import matplotlib.pyplot as plt\na = [24.84, 6.10, 3.92, 2.74,3.48]\nb = [18.22, 57.68, 9.78, 3.34, 5.26]\nc = [30.12, 77.34, 0.5, 1.94, 2.36]\nI = [0.0, 60.0, 45.0, 30.0, 22.5]\nae = [48.30, 36.24, 23.48, 28.46, 67.66]\nbe = [51.72, 7.78, 15.9, 23.36, 18.88]\nce = [47.74, 15.38, 28.48, 26.7, 22.86]\nplt.errorbar(x=I, y=a, yerr=ae, fmt='o', color='g', label = 'M70_q6_spin1x=0.5')\nplt.errorbar(x=I, y=b, yerr=be, fmt='>', color='b', label = 'M80_q7_spin1x=0.5')\nplt.errorbar(x=I, y=c, yerr=ce, fmt='x', color='r', label = 'M90_q8_spin1x=0.5')\n#plt.plot(I, a, linewidth=2,linestyle='dashed',color='g')\n#plt.plot(I, b, linewidth=2,linestyle='dashed',color='b')\n#plt.plot(I, c, linewidth=2,linestyle='dashed',color='r')\nplt.title(\"Estimation of chi_p with varying inclination\")\nplt.xlabel('Inclination (degrees)')\nplt.ylabel('Percentage error')\nplt.legend(loc='lower right', fontsize=10.5)\nplt.axis([-10, 70, -80, 100])\nplt.savefig(\"Inc_precent_error12.png\")\n","sub_path":"Inclination_scatter.py","file_name":"Inclination_scatter.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"414223980","text":"import sys\nimport os\nfrom time import strftime\n\nlArquivo = \"\"\nbIdent_tipo = \"\"\nbIdent = \"\"\nbNome = \"\"\nbDir = \"\"\nbAgenda = \"\"\nbTipo = \"\"\n\ndataAtual = strftime(\"%H:%M\")\n\n#diretorio_script = \"C:\\\\Reposit\"\ndiretorio_script = os.path.dirname(os.path.realpath(__file__))\n\nprint(\"Args \", len(sys.argv))\nif len(sys.argv) == 4:\n\t# parametros passados em frente o script\n\tcliente_codigo = sys.argv[1]\n\tbackup_nome = sys.argv[2]\n\tbackup_dir = sys.argv[3]\n\tcliente_arquivo = cliente_codigo + \".info\"\n\tprint(cliente_codigo)\n\tprint(backup_nome)\n\tprint(backup_dir)\n\tprint(diretorio_script)\n\tprint(cliente_arquivo)\n\tif os.path.isfile(os.path.join(diretorio_script, cliente_arquivo)):\n\t\tprint(\"Arquivo existe\")\n\t\t# le arquivo de configuração do cliente\n\t\tarquivo = open(os.path.join(diretorio_script, cliente_arquivo))\n\t\tlinhas = arquivo.readlines()\n\t\tfor l in linhas:\n\t\t\tif backup_nome in l and backup_dir in l:\n\t\t\t\tlArquivo = l.split('\\n')[0]\n\t\t\t\tprint(lArquivo)\n\t\t\t\tbIdent_tipo = lArquivo.split(';')[0]\n\t\t\t\tbIdent = lArquivo.split(';')[1]\n\t\t\t\t#bNome = lArquivo.split(';')[2]\n\t\t\t\t#bDir = lArquivo.split(';')[3]\n\t\t\t\tbAgenda = lArquivo.split(';')[4]\n\t\t\t\tbTipo = lArquivo.split(';')[5]\n\t\tif bIdent_tipo and bIdent and bAgenda and bTipo:\n\t\t\tif dataAtual == bAgenda:\n\t\t\t\tip = \"67.205.190.52\"\n\t\t\t\tporta = \"-P 9025 \"\n\t\t\t\tcomando_inicial = \"pscp -scp -r -C \"\n\t\t\t\tdiretorio_destino = \":/smartCloudDO/clientes/\" + cliente_codigo + \"/backups/\" + backup_nome + \"/descompactado/\"\n\n\t\t\t\tcomando = comando_inicial + porta + backup_dir + \" \" + cliente_codigo + \"@\" + ip + diretorio_destino\n\t\t\t\tprint(comando)\n\t\t\t\tos.system(comando)\n\t\t\telse:\n\t\t\t\tprint(\"Nao esta na hora!\")\nelse:\n\tprint(\"Número de argumentos invalido\")\n\nprint(\"\")\n","sub_path":"Digital Ocean/Outros/do-UpdateFiles_WIN.py","file_name":"do-UpdateFiles_WIN.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"642454269","text":"# ****************************************************************************#\n# Copyright (c) 2020 Wandemberg Gibaut #\n# All rights reserved. This program and the accompanying materials #\n# are made available under the terms of the MIT License #\n# which accompanies this distribution, and is available at #\n# https://opensource.org/licenses/MIT #\n# #\n# Contributors: #\n# W. Gibaut #\n# #\n# ****************************************************************************#\n\nimport sys\nimport configparser\n\nconfig = configparser.ConfigParser()\nconfig.read_file(sys.stdin)\n\nfor sec in config.sections():\n print(\"declare -A %s\" % sec)\n for key, val in config.items(sec):\n print('%s[%s]=\"%s\"' % (sec, key, val))\n","sub_path":"examples/docker_examples/docker_compose_tcp/nodes/node2/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"391434681","text":"from django.conf.urls import url\nfrom accounts.views import AccountsLoginView, AccountsLogoutView, RegisterView, AccountsIndexView, UpdateUserView, \\\n AddRealEstateView\n\nurlpatterns = [\n url(r'^minhaconta/$', AccountsIndexView.as_view(), name='index'),\n url(r'^minhaconta/editar/$', UpdateUserView.as_view(), name='update_user'),\n url(r'^minhaconta/anunciar/$', AddRealEstateView.as_view(), name='add_real_estate'),\n url(r'^login/$', AccountsLoginView.as_view(), name='login'),\n url(r'^logout/$', AccountsLogoutView.as_view(), name='logout'),\n url(r'^registrar/$', RegisterView.as_view(), name='register'),\n]\n","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"447589787","text":"import re\nfrom collections import Counter\n\nfrom mrjob.protocol import JSONProtocol\nfrom nltk.corpus import words\n\nfrom MRCount import MRCount\n\nWORD = re.compile(\"\\w+\")\n\n\nclass MRMessageWordCount(MRCount):\n INPUT_PROTOCOL = JSONProtocol\n\n def mapper_init(self):\n self.vocabulary = set(map(str.lower, words.words()))\n self.words = Counter()\n\n def mapper(self, _, email):\n _, message = email\n words = (term for term in WORD.findall(message.lower())\n if term in self.vocabulary)\n\n self.words.update(words)\n\n def mapper_final(self):\n for word, occurences in self.words.items():\n yield self.getKey(word), occurences\n\n def reducer(self, word, occurences):\n yield word, sum(occurences)\n\n\nif __name__ == \"__main__\":\n MRMessageWordCount.run()\n","sub_path":"mapreduce/wordoccurences/with_preprocessed_data.py","file_name":"with_preprocessed_data.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"92925472","text":"'''This script loads pre-trained word embeddings (GloVe embeddings)\ninto a frozen Keras Embedding layer, and uses it to\ntrain a text classification model on the 20 Newsgroup dataset\n(classification of newsgroup messages into 20 different categories).\nGloVe embedding data can be found at:\nhttp://nlp.stanford.edu/data/glove.6B.zip\n(source page: http://nlp.stanford.edu/projects/glove/)\n20 Newsgroup data can be found at:\nhttp://www.cs.cmu.edu/afs/cs.cmu.edu/project/theo-20/www/data/news20.html\n'''\n\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport numpy as np\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.utils import to_categorical\n\n\n#jaox imports\nfrom gensim.models import KeyedVectors\nimport _pickle as cPickle\nimport sys, re\nimport xml.etree.ElementTree\nfrom collections import defaultdict\nfrom keras.utils import np_utils\n\n\n#keras\nfrom keras.models import Sequential,Model\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\nfrom keras.layers import Flatten\nfrom keras.layers import Input\nfrom keras.layers import Merge\nfrom keras.utils import np_utils\nfrom keras.layers import Conv1D, MaxPooling1D, Embedding, GlobalMaxPooling1D\nfrom keras.layers import Input, Embedding, LSTM, Dense, merge\n\n#functions---------------------------------------------------------------\n\ndef loadvecs(pathfile, vocab , binary=False):\n\tmodel = KeyedVectors.load_word2vec_format(pathfile, binary=binary)\n\tembeddings_index = {}\n\tfor word in vocab:\n\t\tif word in model.vocab:\n\t\t\tembeddings_index[word] = model[word]\n\tdel model\n\n\tprint('Found %s word vectors.' % len(embeddings_index))\n\treturn embeddings_index\n\ndef clean_str(string, TREC=False):\n\tstring = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string) \n\tstring = re.sub(r\"\\'s\", \" \\'s\", string) \n\tstring = re.sub(r\"\\'ve\", \" \\'ve\", string) \n\tstring = re.sub(r\"n\\'t\", \" n\\'t\", string) \n\tstring = re.sub(r\"\\'re\", \" \\'re\", string) \n\tstring = re.sub(r\"\\'d\", \" \\'d\", string) \n\tstring = re.sub(r\"\\'ll\", \" \\'ll\", string) \n\tstring = re.sub(r\",\", \" , \", string) \n\tstring = re.sub(r\"!\", \" ! \", string) \n\tstring = re.sub(r\"\\(\", \" \\( \", string) \n\tstring = re.sub(r\"\\)\", \" \\) \", string) \n\tstring = re.sub(r\"\\?\", \" \\? \", string) \n\tstring = re.sub(r\"\\s{2,}\", \" \", string) \n\treturn string.strip() if TREC else string.strip().lower()\n\n\ndef load_samples(pathfile, pathfiledev , pathfiletest, pathfiletestpolarity , cv=10, clean_string=True):\n\te = xml.etree.ElementTree.parse(pathfile).getroot()\n\trevs = []\n\tvocab = defaultdict(float)\n\n\tfor line in e.findall('tweet'):\n\t\tpolarity = line.find('sentiment').find('polarity').find('value').text\n\n\t\tif polarity == 'N':\n\t\t\tpolarity = 0\n\t\telif polarity == 'P':\n\t\t\tpolarity = 1\n\t\telif polarity == 'NEU':\n\t\t\tpolarity = 2\n\t\telse:\n\t\t\tpolarity = 3\n\n\t\trev = []\n\t\trev.append(line.find('content').text)\n\t\tif clean_string:\n\t\t\torig_rev = clean_str(\" \".join(rev))\n\t\telse:\n\t\t\torig_rev = \" \".join(rev).lower()\n\n\t\twords = set(orig_rev.split())\n\t\tfor word in words:\n\t\t\tvocab[word] += 1\n\n\t\tdatum = {\n\t\t\t\t\t\"is_train\": 1,\n\t\t\t\t\t\"y\":polarity,\n\t\t\t\t\t\"text\": orig_rev,\n\t\t\t\t\t\"num_words\": len(orig_rev.split()),\n\t\t\t\t\t\"split\": np.random.randint(0,cv)}\n\n\t\trevs.append(datum)\n\n\n\t#read the data dev\n\te = xml.etree.ElementTree.parse(pathfiledev).getroot()\n\n\tfor line in e.findall('tweet'):\n\t\tpolarity = line.find('sentiment').find('polarity').find('value').text\n\n\t\tif polarity == 'N':\n\t\t\tpolarity = 0\n\t\telif polarity == 'P':\n\t\t\tpolarity = 1\n\t\telif polarity == 'NEU':\n\t\t\tpolarity = 2\n\t\telse:\n\t\t\tpolarity = 3\n\n\t\trev = []\n\t\trev.append(line.find('content').text)\n\t\tif clean_string:\n\t\t\torig_rev = clean_str(\" \".join(rev))\n\t\telse:\n\t\t\torig_rev = \" \".join(rev).lower()\n\n\t\twords = set(orig_rev.split())\n\t\tfor word in words:\n\t\t\tvocab[word] += 1\n\n\t\tdatum = {\n\t\t\t\t\t\"is_train\": 1,\n\t\t\t\t\t\"y\":polarity,\n\t\t\t\t\t\"text\": orig_rev,\n\t\t\t\t\t\"num_words\": len(orig_rev.split()),\n\t\t\t\t\t\"split\": np.random.randint(0,cv)}\n\n\t\trevs.append(datum)\n\n\n\t#read the data set test\n\tf = open(pathfiletestpolarity)\n\ti = 0\n\tlst_polarity = []\n\tfor polarity in f.read().split():\n\t\tif(i%2 != 0):\n\t\t\tif polarity == 'N':\n\t\t\t\tpolarity = 0\n\t\t\telif polarity == 'P':\n\t\t\t\tpolarity = 1\n\t\t\telif polarity == 'NEU':\n\t\t\t\tpolarity = 2\n\t\t\telse:\n\t\t\t\tpolarity = 3\n\t\t\tlst_polarity.append(polarity)\n\t\ti = i+1\n\te = xml.etree.ElementTree.parse(pathfiletest).getroot()\n\ti = 0\n\tfor line in e.findall('tweet'):\n\t\trev = []\n\t\trev.append(line.find('content').text)\n\t\tif clean_string:\n\t\t\torig_rev = clean_str(\" \".join(rev))\n\t\telse:\n\t\t\torig_rev = \" \".join(rev).lower()\n\n\t\t\n\t\twords = set(orig_rev.split())\n\t\tfor word in words:\n\t\t\tvocab[word] += 1\n\t\t\n\t\tdatum = {\n\t\t\t\t\t\"is_train\": 0,\n\t\t\t\t\t\"y\":lst_polarity[i], \n\t\t\t\t\t\"text\": orig_rev,\n\t\t\t\t\t\"num_words\": len(orig_rev.split()),\n\t\t\t\t\t\"split\": np.random.randint(0,cv)}\n\n\t\trevs.append(datum)\n\t\ti = i + 1\n\n\treturn revs, vocab\n\ndef extract_label(texts , numbers_categories):\n\ttext = []\n\tlabels = []\n\tfor doc in texts:\n\t\tsentence = doc['text']\n\t\ttext.append(sentence)\n\t\tlabels.append(doc['y'])\n\tlabels = np_utils.to_categorical(labels, 4)\n\treturn text, labels\n\n\ndef extract_cv(x, y , cv, index):\n\t\n\tindex_cv = []\n\n\tx_train = []\n\tx_test = []\n\ty_test = []\n\ty_train = []\n\n\n\tfor i in range(len(x)):\n\t\tindex_cv.append(np.random.randint(0,cv))\n\n\n\tfor i in range(len(x)):\n\t\tif index_cv[i] == index:\n\t\t\tx_test.append(x[i])\n\t\t\ty_test.append(y[i])\n\t\telse:\n\t\t\tx_train.append(x[i])\n\t\t\ty_train.append(y[i])\n\n\n\treturn np.asarray(x_train), np.asarray(y_train), np.asarray(x_test), np.asarray(y_test)\n\n\ndef format_data_aspect (x_train_text, y_train_text, embedding_matrix1, embedding_matrix2 , embedding_matrix3 , EMBEDDING_DIM, MAX_SEQUENCE_LENGTH ):\n\n\tx_train1= []\n\tx_train2 = []\n\tx_train3 = []\n\n\n\tfor i in range ( len( x_train_text ) ):\n\t\t#for a, aspect in enumerate(x_train_aspect[i]):\n\n\t\tx_sentence1 = np.zeros((MAX_SEQUENCE_LENGTH, EMBEDDING_DIM))\n\t\tx_sentence2 = np.zeros((MAX_SEQUENCE_LENGTH, EMBEDDING_DIM))\n\t\tx_sentence3 = np.zeros((MAX_SEQUENCE_LENGTH, EMBEDDING_DIM))\n\n\t\tfor j,index in enumerate(x_train_text[i]): #recorremos las palabras en la oracion\n\t\t\tx_sentence1[j] = embedding_matrix1 [ index ]\n\t\t\tx_sentence2[j] = embedding_matrix2 [ index ]\n\t\t\tx_sentence3[j] = embedding_matrix3 [ index ]\n\n\t\tx_train1.append(x_sentence1)\n\t\tx_train2.append(x_sentence2)\n\t\tx_train3.append(x_sentence3)\n\n\treturn np.asarray(x_train1), np.asarray(x_train2), np.asarray(x_train3), np.asarray(y_train_text)\n\n\n#end---------------------------------------------------------------------\n\n\n\n#vars global-------------------------------------------------------------\n\n\n\nMAX_SEQUENCE_LENGTH = 35\nEMBEDDING_DIM = 300\nMAX_NUM_WORDS = 20000\nnum_validation_samples = 1899 \n#end---------------------------------------------------------------------\n\n#main -------------------------------------------------------------------\n\nprint('loading samples text tweets')\ntexts, vocab = load_samples('../datasets/intertass-train-tagged.xml','../datasets/intertass-development-tagged.xml', '../datasets/intertass-test.xml', '../datasets/intertass-sentiment.qrel.txt', cv=3, clean_string=True)\n\nprint('loading word vectors.')\nif False:\n\tprint('loading word vectors from files.')\n\tw2v = loadvecs('../pretrained/sbw_vectors.bin',vocab, binary=True)\n\tft = loadvecs('../pretrained/fasttext_sbwc.vec',vocab, binary=False)\n\tglv = loadvecs('../pretrained/glove_sbwc.vec',vocab, binary=False)\n\tcPickle.dump([w2v, ft, glv], open(\"mr.p\", \"wb\"))\nelse:\n\tprint('loading pre saved vectors.')\n\tx = cPickle.load(open(\"mr.p\",\"rb\"))\n\tw2v, ft, glv = x[0], x[1], x[2]\n\n\ntext, labels = extract_label(texts , 4)\n\n\n##quitar palabras que no estan en el vocab en el text\nn_nowords=0\nfor i , sentence in enumerate(text):\n\tsentence_aux = \"\"\n\tfor word in sentence.split():\n\t\tif (word in w2v) and (word in ft) and (word in glv) :\n\t\t\tsentence_aux += \" \" + word + \" \" \n\t\telse:\n\t\t\tn_nowords += 1\n\ttext[i] = sentence_aux\n\nprint('remove ', n_nowords, ' words in not vocab')\n\n# finally, vectorize the text samples into a 2D integer tensor\ntokenizer = Tokenizer()\ntokenizer.fit_on_texts(text)\nsequences = tokenizer.texts_to_sequences(text)\n\n\n#select the greater ocurrence of len twwets\nhisto = np.zeros((35))\nfor s in sequences:\n\thisto[len(s)] += 1\n\nimax = 0\ntemp = 0\nfor i, h in enumerate(histo):\n\tif h>temp:\n\t\ttemp = h\n\t\timax = i\n\nMAX_SEQUENCE_LENGTH = 40#imax\n\n\nword_index = tokenizer.word_index\nprint('Found %s unique tokens.' % len(word_index))\n\ndata = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)\n\nprint('Shape of data tensor:', data.shape)\nprint('Shape of label tensor:', labels.shape)\n\n# split the data into a training set and a validation set\nx_train = data[:-num_validation_samples]\ny_train = labels[:-num_validation_samples]\nx_val = data[-num_validation_samples:]\ny_val = labels[-num_validation_samples:]\n\n\n\nprint('Preparing embedding matrix.')\n# prepare embedding matrix\nnum_words = min(MAX_NUM_WORDS, len(word_index)+1 ) \nembedding_matrix = np.zeros((num_words, EMBEDDING_DIM))\nembedding_matrix2 = np.zeros((num_words, EMBEDDING_DIM))\nembedding_matrix3 = np.zeros((num_words, EMBEDDING_DIM))\n\n\n\nfor word, i in word_index.items():\n\tif i >= MAX_NUM_WORDS:\n\t\tcontinue\n\tembedding_vector = w2v.get(word)\n\tif embedding_vector is not None:\n\t# words not found in embedding index will be all-zeros.\n\t\tembedding_matrix[i] = embedding_vector\n\t\n\tembedding_vector = ft.get(word)\n\tif embedding_vector is not None:\n\t# words not found in embedding index will be all-zeros.\n\t\tembedding_matrix2[i] = embedding_vector\n\n\t\n\tembedding_vector = glv.get(word)\n\tif embedding_vector is not None:\n\t# words not found in embedding index will be all-zeros.\n\t\tembedding_matrix3[i] = embedding_vector\n\nimport random\nrandom.seed(12345)\n\n\n#define a model\ninp = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')\nembeddings_layer = Embedding(\n\t\t\t\tnum_words, # due to mask_zero\n\t\t\t\tEMBEDDING_DIM,\n\t\t\t\tinput_length=MAX_SEQUENCE_LENGTH,\n\t\t\t\tweights=[embedding_matrix3],\n\t\t\t\ttrainable=False\n\t\t\t)(inp)\n\nembeddings_layer_t = Embedding(\n\t\t\t\tnum_words, # due to mask_zero\n\t\t\t\tEMBEDDING_DIM,\n\t\t\t\tinput_length=MAX_SEQUENCE_LENGTH,\n\t\t\t\tweights=[embedding_matrix],\n\t\t\t\ttrainable=True\n\t\t\t)(inp)\n\nembeddings_layer_t2 = Embedding(\n\t\t\t\tnum_words, # due to mask_zero\n\t\t\t\tEMBEDDING_DIM,\n\t\t\t\tinput_length=MAX_SEQUENCE_LENGTH,\n\t\t\t\tweights=[embedding_matrix2],\n\t\t\t\ttrainable=True\n\t\t\t)(inp)\n\n#Convolution\n\nfilter_sizes = [2,2,2]\nfilter_numbers = [100, 100 , 100]\nfilter_pool_lengths = [2,2,2]\n\nconvolution_features_list = []\nfor filter_size,pool_length,num_filters in zip(filter_sizes, filter_pool_lengths, filter_numbers):\n\tconv_layer = Conv1D(nb_filter=num_filters, filter_length=filter_size, activation='relu')(embeddings_layer)\n\tpool_layer = GlobalMaxPooling1D()(conv_layer)\n\t#pool_layer = MaxPooling1D(pool_length=pool_length)(conv_layer)\n\t#flatten = Flatten()(pool_layer)\n\tconvolution_features_list.append(pool_layer)\n\nfor filter_size,pool_length,num_filters in zip(filter_sizes, filter_pool_lengths, filter_numbers):\n\tconv_layer = Conv1D(nb_filter=num_filters, filter_length=filter_size, activation='relu')(embeddings_layer_t)\n\tpool_layer = GlobalMaxPooling1D()(conv_layer)#MaxPooling1D(pool_length=pool_length)(conv_layer)\n\t#flatten = Flatten()(pool_layer)\n\tconvolution_features_list.append(pool_layer)\n\nfor filter_size,pool_length,num_filters in zip(filter_sizes, filter_pool_lengths, filter_numbers):\n\tconv_layer = Conv1D(nb_filter=num_filters, filter_length=filter_size, activation='relu')(embeddings_layer_t2)\n\tpool_layer = GlobalMaxPooling1D()(conv_layer)\n\t#pool_layer = MaxPooling1D(pool_length=pool_length)(conv_layer)\n\t#flatten = Flatten()(pool_layer)\n\tconvolution_features_list.append(pool_layer)\n\n\nout1 = Merge(mode='concat')(convolution_features_list) \nnetwork = Model(input=inp, output=out1)\n\n# Model\t\nmodel = Sequential()\nmodel.add(network)\n\n#Add dense layer to complete the model\nmodel.add(Dense(16,init='uniform',activation='relu'))\n#model.add(Dropout(0.2))\nmodel.add( Dense(4, init='uniform', activation='softmax') )\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n\nhistory = model.fit(x_train, y_train, validation_data=(x_val, y_val), nb_epoch=6, batch_size=32)\n\n#from keras.utils.vis_utils import plot_model\n\nimport matplotlib.pyplot as plt\n\n# summarize history for accuracy\nplt.plot(history.history['acc'])\nplt.plot(history.history['val_acc'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()\n# summarize history for loss\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()\n\n#plot_model(model, to_file=\"model_plot.png\", show_shapes=True, show_layer_names=True)","sub_path":"trabajo final/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":12729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"409218263","text":"# -*- coding: utf-8 -*-\nimport json\nimport logging\n\nfrom luckyapi.model.activity import Revealed\nfrom luckyapi.model.show import ShowLite, ShowDetail\nfrom luckyapi.logic.crowdfunding import get_revealed\n\nfrom luckycommon.cache import redis_cache\nfrom luckycommon.account.db import account as account_db\nfrom luckycommon.model.show import SHOW_STATUS\nfrom luckycommon.db.show import (get_user_shows, get_show_by_id,\n get_user_verified_shows, get_latest_highlight_shows,\n get_shows_by_time, get_show_by_template)\nfrom luckycommon.db.activity import get_activitys_by_ids, get_activity, get_activity_win\n\nfrom luckycommon.utils import id_generator\nfrom luckycommon.utils.tz import utc_to_local\nfrom luckycommon.utils.exceptions import AuthenticateError\n\nfrom future.utils import raise_with_traceback\n\n\n_LOGGER = logging.getLogger('lucky')\n\n_DEFAULT_PAGE_SIZE = 20\n\n\ndef _create_show_lite_list(show_list):\n id_list = []\n for s in show_list:\n a_id = id_generator.generate_uuid(\n 'activity:%s:%s' % (s.template_id, s.term_number))\n id_list.append(a_id)\n activitys = get_activitys_by_ids(id_list)\n a_dict = dict([('%s:%s' % (a.template_id, a.term_number), a)\n for a in activitys])\n lite_list = []\n id_set = set()\n for show in show_list:\n lite = ShowLite()\n lite.id = show.id\n if lite.id in id_set:\n continue\n id_set.add(lite.id)\n lite.gid = show.template_id\n lite.term = show.term_number\n activity = a_dict.get('%s:%s' % (lite.gid, lite.term))\n lite.unit = activity.unit\n lite.pk = activity.pk\n lite.goods = {\n 'name': activity.name,\n 'desc': activity.desc,\n 'cover': activity.cover or activity.images.split(',')[0]\n }\n if show.status == SHOW_STATUS.WAIT_SHOW:\n revealed_dict = get_revealed(activity, use_cache=True)\n revealed = Revealed()\n revealed.activity_id = activity.id\n revealed.term = activity.term_number\n revealed.pk = activity.pk\n revealed.target_amount = activity.target_amount\n revealed.name = activity.name\n revealed.lucky_number = revealed_dict['lucky_number']\n revealed.reveal_time = revealed_dict['reveal_time']\n revealed.winner = revealed_dict['winner']\n lite.revealed = revealed\n lite.order_id = show.order_id\n else:\n cached_winner = redis_cache.get_activity_winner(activity.id)\n if cached_winner:\n winner_info = json.loads(cached_winner)\n account = account_db.get_account(winner_info.get('uid'), use_cache=True)\n lite.winner = {\n 'uid': winner_info.get('uid'),\n 'avatar': account.avatar_id,\n 'nick_name': account.nick_name,\n 'time': utc_to_local(activity.announced_at).strftime('%Y-%m-%d %H:%M:%S'),\n 'num_count': winner_info.get('num_count') or len(winner_info.get('numbers'))\n }\n else:\n win = get_activity_win(activity.id)\n if win:\n try:\n info = {} if not win.announce_info else json.loads(win.announce_info)\n except Exception:\n _LOGGER.info('#bug# activity id: %s' % activity.id)\n info = {}\n if info.get('winner'):\n winner_info = info['winner']\n number_count = winner_info.get(\n 'num_count') or len(winner_info.get('numbers'))\n account = account_db.get_account(winner_info.get('uid'), use_cache=True)\n lite.winner = {\n 'uid': winner_info.get('uid'),\n 'avatar': account.avatar_id,\n 'nick_name': account.nick_name,\n 'time': utc_to_local(activity.announced_at).strftime('%Y-%m-%d %H:%M:%S'),\n 'num_count': number_count\n }\n lite.title = show.title\n lite.content = show.content\n lite.images = show.images\n lite.status = show.status\n if show.status == SHOW_STATUS.VERIFY_PRE:\n lite.status = SHOW_STATUS.WAIT_VERIFY\n lite.highlight = show.highlight\n lite.show_time = utc_to_local(\n show.updated_at).strftime('%Y-%m-%d %H:%M:%S')\n lite.verified_at = show.verified_at\n lite_list.append(lite)\n\n return lite_list\n\n\ndef view_my_shows(user_id, page, size):\n limit = _DEFAULT_PAGE_SIZE if not size or size > _DEFAULT_PAGE_SIZE else size\n if not page or page < 1:\n page = 1\n offset = 0 if not page else (page - 1) * limit\n show_list = get_user_shows(user_id, limit, offset)\n return _create_show_lite_list(show_list)\n\n\ndef view_other_shows(user_id, page, size):\n limit = _DEFAULT_PAGE_SIZE if not size or size > _DEFAULT_PAGE_SIZE else size\n if not page or page < 1:\n page = 1\n offset = 0 if not page else (page - 1) * limit\n show_list = get_user_verified_shows(user_id, limit, offset)\n return _create_show_lite_list(show_list)\n\n\ndef view_shows_timeline(start_ts, max_ts, template_id=None):\n show_list = []\n if not template_id and start_ts == 0 and max_ts == -1:\n show_list.extend(get_latest_highlight_shows())\n s_list = get_shows_by_time(start_ts, max_ts, template_id)\n show_list.extend(s_list)\n return _create_show_lite_list(show_list)\n\n\ndef _create_show_detail(show):\n detail = ShowDetail()\n detail.id = show.id\n detail.gid = show.template_id\n detail.term = show.term_number\n activity = get_activity(None, detail.gid, detail.term)\n detail.unit = activity.unit\n detail.price = activity.target_amount\n detail.goods = {\n 'name': activity.name,\n 'desc': activity.desc,\n 'cover': activity.cover or activity.images.split(',')[0]\n }\n detail.title = show.title\n detail.content = show.content\n detail.images = show.images\n detail.status = show.status\n if show.status == SHOW_STATUS.VERIFY_PRE:\n detail.status = SHOW_STATUS.WAIT_VERIFY\n detail.show_time = utc_to_local(\n show.updated_at).strftime('%Y-%m-%d %H:%M:%S')\n detail.verified_at = show.verified_at\n detail.highlight = show.highlight\n detail.verify_comment = show.verify_comment\n revealed_dict = get_revealed(activity, use_cache=True)\n revealed = Revealed()\n revealed.activity_id = activity.id\n revealed.term = activity.term_number\n revealed.name = activity.name\n revealed.pk = activity.pk\n revealed.lucky_number = revealed_dict['lucky_number']\n revealed.reveal_time = revealed_dict['reveal_time']\n revealed.winner = revealed_dict['winner']\n detail.revealed = revealed\n return detail\n\n\ndef view_show_detail(user_id, show_id):\n show = get_show_by_id(show_id)\n if show.status != SHOW_STATUS.VERIFY_SUCCESS and\\\n show.user_id != user_id:\n raise AuthenticateError('not access')\n return _create_show_detail(show)\n\n\ndef view_my_show_detail(user_id, template_id, term_number):\n show = get_show_by_template(template_id, term_number)\n if show.user_id != user_id:\n raise AuthenticateError('not access')\n return _create_show_detail(show)\n","sub_path":"luckyapi/logic/show.py","file_name":"show.py","file_ext":"py","file_size_in_byte":7512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"575730940","text":"import calendar\nimport datetime\nimport json\n\nfrom pyramid.authentication import (\n Authenticated,\n Everyone,\n)\nfrom pyramid.authorization import Allow\nfrom sqlalchemy import (\n Column,\n Integer,\n Text,\n)\nfrom sqlalchemy.dialects.postgresql import ARRAY\n\nfrom perpetualfailure.db import Base\n\n\nclass ObjectEncoder(json.JSONEncoder):\n def default(self, object):\n if hasattr(object, \"__json__\"):\n return object.__json__()\n elif isinstance(object, datetime):\n return calendar.timegm(object.utctimetuple())\n return json.JSONEncoder.default(self, object)\n\n\nclass LS_Background(Base):\n __tablename__ = \"gmod_loadscreen_background\"\n\n id = Column(Integer, primary_key=True, nullable=False)\n map = Column(Text, nullable=False)\n gamemode = Column(Text, nullable=True)\n url = Column(Text, nullable=False)\n\n @property\n def __acl__(self):\n return [\n (Allow, \"acl.p:gmod.background.edit\", \"edit\"),\n (Allow, \"acl.p:gmod.background.create\", \"create\"),\n (Allow, \"acl.p:gmod.background.delete\", \"delete\"),\n ]\n\n\nclass LS_Gamemode(Base):\n __tablename__ = \"gmod_loadscreen_gamemode\"\n\n id = Column(Integer, primary_key=True, nullable=False)\n name = Column(Text, nullable=False)\n title = Column(Text, nullable=False)\n rules = Column(ARRAY(Text), nullable=False)\n extrainfo = Column(ARRAY(Text), nullable=False)\n\n @property\n def __acl__(self):\n return [\n (Allow, \"acl.p:gmod.gamemode.edit\", \"edit\"),\n (Allow, \"acl.p:gmod.gamemode.create\", \"create\"),\n (Allow, \"acl.p:gmod.gamemode.delete\", \"delete\"),\n ]\n\n def __json__(self):\n return {\n \"gamemode\": self.name,\n \"title\": self.title,\n \"rules\": self.rules,\n \"extrainfo\": self.extrainfo,\n }\n","sub_path":"temporals_web/gmod/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"117658584","text":"#!/usr/bin/python\n#-*-coding:utf-8-*-\nfrom youdao import YouDaoAPI\nfrom bsbdj import BsAPI\nfrom qiubai import QbAPI\nfrom qulishi import QulishiAPI\nfrom baiduting import BaiDuTingAPI\nfrom mjtt import MjttAPI\nimport os\n\nclass RobotAPI():\n def __init__(self):\n pass\n\n def responder(self, msgbody):\n res = {'msgtype': '', 'content': ''}\n try:\n if msgbody['msgtype'] == 'text':\n res['msgtype'], res['content'] = RobotService.translator(msgbody['content'])\n if msgbody['msgtype'] == 'event':\n if msgbody['event'] == 'subscribe':\n res['msgtype'], res['content'] = RobotService.welcome()\n elif msgbody['eventkey'] == 'MY_JOKE':\n #res['msgtype'], res['content'] = RobotService.bsjoke()\n res['msgtype'], res['content'] = RobotService.qbjoke()\n elif msgbody['eventkey'] == 'MY_HISTORY':\n res['msgtype'], res['content'] = RobotService.qulishi()\n elif msgbody['eventkey'] == 'MY_MUSIC':\n res['msgtype'], res['content'] = RobotService.ting()\n elif msgbody['eventkey'] == 'MY_MEIJU':\n res['msgtype'], res['content'] = RobotService.meiju()\n except:\n res['msgtype'], res['content'] = 'text', u'系统繁忙,请稍后重试'\n if os.getenv('APP_DEBUG') == 'True':\n raise\n return res\n\nclass RobotService():\n def __init__(self):\n pass\n\n # print welcome message\n @classmethod\n def welcome(cls):\n return 'text', u'欢迎关注!'\n\n # translator from youdao API\n @classmethod\n def translator(cls, word):\n youdao = YouDaoAPI()\n return youdao.translator(word)\n\n # get joke from budejie\n @classmethod\n def bsjoke(cls):\n bs = BsAPI()\n return bs.get()\n\n # get QiuBai\n @classmethod\n def qbjoke(cls):\n qb = QbAPI()\n return qb.get()\n\n # get history from qulishi\n @classmethod\n def qulishi(cls):\n qulishi = QulishiAPI()\n return qulishi.get()\n\n # get music from baidu ting\n @classmethod\n def ting(cls):\n bdapi = BaiDuTingAPI()\n return bdapi.get()\n\n # get meiju intro from meijutt\n @classmethod\n def meiju(cls):\n mjapi = MjttAPI()\n return mjapi.get()\n","sub_path":"transaction/robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"18255226","text":"\n\ndef countStrokes(A:list):\n strokeSum = 0\n lastHeight = 0\n for skyLine in A:\n heightDiff = skyLine - lastHeight\n if heightDiff > 0:\n strokeSum += heightDiff\n lastHeight = skyLine\n if strokeSum > 1000000000:\n raise MaximumHeightExceeds\n return strokeSum\n\nclass MaximumHeightExceeds(Exception):\n pass\n\n\ndef solution(A):\n try:\n result = countStrokes(A)\n except MaximumHeightExceeds:\n return -1\n return result\n\n\nif __name__ == \"__main__\":\n a = [1, 3, 2, 1, 2, 1, 5, 3, 3, 4, 2]\n b = [5, 8]\n c = [2, 15, 1000000001]\n print(solution(a))\n print(solution(b))\n print(solution(c))","sub_path":"Online Judge/CodingTest/zum01.py","file_name":"zum01.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"487007785","text":"from .helpers import *\nfrom liberouterapi.modules.nemea.supervisor.models import nemea_module as nm_model\n\n\ndef get_modules_names():\n names = set()\n names.add(NEMEA_SR_PREFIX)\n for nemea_module in nm_model.get_all():\n if 'sr-model-prefix' in nemea_module:\n names.add(nemea_module['sr-model-prefix'])\n\n return list(names)\n\n\ndef get_module_by_name(name, model_type=None):\n modules = get_modules_names()\n if name not in modules:\n raise NotFoundException(\"Module '{}' is not installed in sysrepo.\".format(name))\n\n if model_type not in [None, 'YIN', 'YANG', 'TREE']:\n raise InvalidRequest(\"Invalid model type '{}'\".format(model_type))\n\n module = yanglint_get_sr_module(name)\n\n if model_type is not None:\n return {model_type: module[model_type]}\n\n return module\n\n\ndef direct_insert(sysrepo_module_name, data):\n available_models = get_modules_names()\n if sysrepo_module_name not in available_models:\n raise InvalidRequest(\"Sysrepo module '{}' was not found in list of modules this API has access to.\")\n sysrepocfg_merge(sysrepo_module_name, data)\n\n","sub_path":"Nemea/backend/supervisor/models/sysrepo.py","file_name":"sysrepo.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"119496386","text":"from __future__ import absolute_import\n\nimport re\nimport unittest\nimport datetime\nfrom mock import patch, Mock\nimport time\n\nfrom mozregression import config, fetch_build_info, fetch_configs, errors\nfrom .test_fetch_configs import create_push\n\n\nclass TestInfoFetcher(unittest.TestCase):\n def setUp(self):\n fetch_config = fetch_configs.create_config('firefox', 'linux', 64,\n 'x86_64')\n self.info_fetcher = fetch_build_info.InfoFetcher(fetch_config)\n\n @patch('requests.get')\n def test__fetch_txt_info(self, get):\n response = Mock(text=\"20141101030205\\nhttps://hg.mozilla.org/\\\nmozilla-central/rev/b695d9575654\\n\")\n get.return_value = response\n expected = {\n 'repository': 'https://hg.mozilla.org/mozilla-central',\n 'changeset': 'b695d9575654',\n }\n self.assertEqual(self.info_fetcher._fetch_txt_info('http://foo.txt'),\n expected)\n\n @patch('requests.get')\n def test__fetch_txt_info_old_format(self, get):\n response = Mock(text=\"20110126030333 e0fc18b3bc41\\n\")\n get.return_value = response\n expected = {\n 'changeset': 'e0fc18b3bc41',\n }\n self.assertEqual(self.info_fetcher._fetch_txt_info('http://foo.txt'),\n expected)\n\n\nclass TestNightlyInfoFetcher(unittest.TestCase):\n def setUp(self):\n fetch_config = fetch_configs.create_config('firefox', 'linux', 64,\n 'x86_64')\n self.info_fetcher = fetch_build_info.NightlyInfoFetcher(fetch_config)\n\n @patch('mozregression.fetch_build_info.url_links')\n def test__find_build_info_from_url(self, url_links):\n url_links.return_value = [\n 'file1.txt.gz',\n 'file2.txt',\n 'firefox01linux-x86_64.txt',\n 'firefox01linux-x86_64.tar.bz2',\n ]\n expected = {\n 'build_txt_url': 'http://foo/firefox01linux-x86_64.txt',\n 'build_url': 'http://foo/firefox01linux-x86_64.tar.bz2',\n }\n builds = []\n self.info_fetcher._fetch_build_info_from_url('http://foo', 0, builds)\n self.assertEqual(builds, [(0, expected)])\n\n @patch('mozregression.fetch_build_info.url_links')\n def test__find_build_info_incomplete_data_raises_exception(self, url_links):\n # We want to find a valid match for one of the build file regexes,\n # build_info_regex. But we will make the build filename regex fail. This\n # could happen if, for example, the name of the build file changed in\n # the archive but our tool is still searching with the old build file\n # regex.\n url_links.return_value = [\n \"validinfofilename.txt\",\n \"invalidbuildfilename.tar.bz2\"\n ]\n # build_regex doesn't match any of the files in the web directory.\n self.info_fetcher.build_regex = re.compile(\"xxx\")\n # But build_info_regex does match one file in the directory.\n self.info_fetcher.build_info_regex = re.compile(\"validinfofilename.txt\")\n\n with self.assertRaises(errors.BuildInfoNotFound):\n self.info_fetcher._fetch_build_info_from_url(\"some-url\", 1, [])\n\n @patch('mozregression.fetch_build_info.url_links')\n def test__get_url(self, url_links):\n url_links.return_value = [\n '2014-11-01-03-02-05-mozilla-central/',\n '2014-11-01-03-02-05-foo/',\n 'foo',\n 'bar/'\n ]\n urls = self.info_fetcher._get_urls(datetime.date(2014, 11, 0o1))\n self.assertEqual(\n urls[0],\n fetch_configs.ARCHIVE_BASE_URL +\n '/firefox/nightly/2014/11/2014-11-01-03-02-05-mozilla-central/')\n urls = self.info_fetcher._get_urls(datetime.date(2014, 11, 0o2))\n self.assertEqual(urls, [])\n\n def test_find_build_info(self):\n get_urls = self.info_fetcher._get_urls = Mock(return_value=[\n 'https://archive.mozilla.org/pub/mozilla.org/\\\nbar/nightly/2014/11/2014-11-15-08-02-05-mozilla-central/',\n 'https://archive.mozilla.org/pub/mozilla.org/\\\nbar/nightly/2014/11/2014-11-15-04-02-05-mozilla-central/',\n 'https://archive.mozilla.org/pub/mozilla.org/\\\nbar/nightly/2014/11/2014-11-15-03-02-05-mozilla-central',\n 'https://archive.mozilla.org/pub/mozilla.org/\\\nbar/nightly/2014/11/2014-11-15-02-02-05-mozilla-central/',\n 'https://archive.mozilla.org/pub/mozilla.org/\\\nbar/nightly/2014/11/2014-11-15-01-02-05-mozilla-central/',\n ])\n\n def my_find_build_info(url, index, lst):\n # say only the last build url is invalid\n if url in get_urls.return_value[:-1]:\n return\n lst.append((index, {\n 'build_txt_url': url,\n 'build_url': url,\n }))\n self.info_fetcher._fetch_build_info_from_url = Mock(\n side_effect=my_find_build_info\n )\n self.info_fetcher._fetch_txt_info = Mock(return_value={})\n result = self.info_fetcher.find_build_info(datetime.date(2014, 11, 15))\n # we must have found the last build url valid\n self.assertEqual(result.build_url, get_urls.return_value[-1])\n\n def test_find_build_info_no_data(self):\n self.info_fetcher._get_urls = Mock(return_value=[])\n with self.assertRaises(errors.BuildInfoNotFound):\n self.info_fetcher.find_build_info(datetime.date(2014, 11, 15))\n\n\nclass TestIntegrationInfoFetcher(unittest.TestCase):\n def setUp(self):\n fetch_config = fetch_configs.create_config('firefox', 'linux', 64,\n 'x86_64')\n self.info_fetcher = fetch_build_info.IntegrationInfoFetcher(fetch_config)\n\n @patch('taskcluster.Index')\n @patch('taskcluster.Queue')\n def test_find_build_info(self, Queue, Index):\n Index.return_value.findTask.return_value = {'taskId': 'task1'}\n Queue.return_value.status.return_value = {\n \"status\": {\"runs\": [{\n \"state\": \"completed\",\n \"runId\": 0,\n \"resolved\": '2015-06-01T22:13:02.115Z'\n }]}\n }\n Queue.return_value.listArtifacts.return_value = {\n \"artifacts\": [\n # return two valid artifact names\n {'name': 'firefox-42.0a1.en-US.linux-x86_64.tar.bz2'},\n {'name': 'firefox-42.0a1.en-US.linux-x86_64.txt'},\n ]\n }\n Queue.return_value.buildUrl.return_value = (\n 'http://firefox-42.0a1.en-US.linux-x86_64.tar.bz2'\n )\n self.info_fetcher._fetch_txt_info = \\\n Mock(return_value={'changeset': '123456789'})\n\n # test that we start searching using the correct tc root url\n for push_timestamp in [\n 0,\n time.mktime(\n config.TC_ROOT_URL_MIGRATION_FLAG_DATE.timetuple()) + 100\n ]:\n result = self.info_fetcher.find_build_info(\n create_push('123456789', push_timestamp))\n if push_timestamp == 0:\n Index.assert_called_with({'rootUrl': config.OLD_TC_ROOT_URL})\n else:\n Index.assert_called_with({'rootUrl': config.TC_ROOT_URL})\n self.assertEqual(result.build_url,\n 'http://firefox-42.0a1.en-US.linux-x86_64.tar.bz2')\n self.assertEqual(result.changeset, '123456789')\n self.assertEqual(result.build_type, \"integration\")\n\n @patch('taskcluster.Index')\n def test_find_build_info_no_task(self, Index):\n Index.findTask = Mock(\n side_effect=fetch_build_info.TaskclusterFailure\n )\n with self.assertRaises(errors.BuildInfoNotFound):\n self.info_fetcher.find_build_info(\n create_push('123456789', 1))\n\n @patch('taskcluster.Index')\n @patch('taskcluster.Queue')\n def test_get_valid_build_no_artifacts(self, Queue, Index):\n def find_task(route):\n return {'taskId': 'task1'}\n\n def status(task_id):\n return {\"status\": {\"runs\": [{\n \"state\": \"completed\",\n \"runId\": 0,\n \"resolved\": '2015-06-01T22:13:02.115Z'\n }]}}\n\n def list_artifacts(taskid, run_id):\n return {\"artifacts\": []}\n\n Index.findTask = find_task\n Queue.status = status\n Queue.listArtifacts = list_artifacts\n\n with self.assertRaises(errors.BuildInfoNotFound):\n self.info_fetcher.find_build_info(\n create_push('123456789', 1))\n\n @patch('mozregression.json_pushes.JsonPushes.push')\n def test_find_build_info_check_changeset_error(self, push):\n push.side_effect = errors.MozRegressionError\n with self.assertRaises(errors.BuildInfoNotFound):\n self.info_fetcher.find_build_info('123456789',)\n push.assert_called_with('123456789')\n","sub_path":"tests/unit/test_fetch_build_info.py","file_name":"test_fetch_build_info.py","file_ext":"py","file_size_in_byte":8970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"446589557","text":"import re\nimport operator\n\nXSize = 400\nYSize = 400\n\ndef getCoords():\n raw = \"\"\"78, 335\n74, 309\n277, 44\n178, 286\n239, 252\n118, 354\n170, 152\n75, 317\n156, 318\n172, 45\n138, 162\n261, 195\n306, 102\n282, 67\n53, 141\n191, 237\n352, 180\n95, 247\n353, 357\n201, 327\n316, 336\n57, 43\n119, 288\n299, 328\n125, 327\n187, 186\n121, 151\n121, 201\n43, 67\n76, 166\n238, 148\n326, 221\n219, 207\n237, 160\n345, 244\n321, 346\n48, 114\n304, 80\n265, 216\n191, 92\n54, 75\n118, 260\n336, 249\n81, 103\n290, 215\n300, 246\n293, 59\n150, 274\n296, 311\n264, 286\n\"\"\".splitlines()\n\n return [tuple(map(int, coord)) for coord in [re.match(r'(\\d+)\\, (\\d+)', line).groups() for line in raw]]\n\ndef getDistance(x1, y1, x2, y2):\n return abs(abs((x2 - x1)) + abs((y2 - y1)))\n\ndef isInfinite(x,y):\n if x == 0 or y == 0 or x == XSize-1 or y == YSize-1:\n return True\n return False\n\nnames = iter(\"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789\")\ncoords = {next(names):coord for coord in getCoords()}\n\nfield = {}\ninfinites = []\n\nfor y in range(0, XSize):\n for x in range(0, YSize):\n # print(x, \" - \" , y)\n shortestDistance = []\n multipleDistances = []\n for key in coords.keys():\n distance = getDistance(coords[key][0], coords[key][1], x, y)\n if shortestDistance:\n if distance < shortestDistance[1]:\n shortestDistance.clear()\n shortestDistance.append(key)\n shortestDistance.append(distance)\n multipleDistances.clear()\n elif distance == shortestDistance[1]:\n multipleDistances.append((x,y))\n else:\n shortestDistance.append(key)\n shortestDistance.append(distance)\n # print(shortestDistance)\n if (x,y) in coords.values():\n key = [key for key,value in coords.items() if value == (x,y)][0]\n if(key in field.keys()):\n field[key] += 1 \n else:\n field[key] = 1\n if isInfinite(x,y):\n if key not in infinites:\n infinites.append(key)\n # print(key, end=\" \")\n elif (x,y) in multipleDistances:\n # print(\".\", end=\" \")\n pass\n else:\n # print(shortestDistance[0], end=\" \")\n if(shortestDistance[0] in field.keys()):\n field[shortestDistance[0]] += 1\n else:\n field[shortestDistance[0]] = 1\n if isInfinite(x,y):\n if shortestDistance[0] not in infinites:\n infinites.append(shortestDistance[0])\n # print()\nsorted_by_value = sorted(field.items(), key=lambda kv: kv[1])\n[print(item) for item in sorted_by_value if item[0] not in infinites]","sub_path":"2018/06_01.py","file_name":"06_01.py","file_ext":"py","file_size_in_byte":2795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"217589415","text":"import pytest\nimport networkx as nx\nimport numpy as np\n\nfrom functools import reduce\nfrom itertools import product\n\nlproduct = lambda *args, **kwds: list(product(*args, **kwds))\n\n## d-separation ##\nfrom .d_separation import d_separates, path_d_separates\n\n# Kite graph\n# 0 = season\n# 1 = rain\n# 2 = sprinkler\n# 3 = wet\n# 4 = slippery\nkite = nx.DiGraph()\nkite.add_nodes_from(range(5))\nkite.add_edges_from([\n (0,1), (0,2), (1,3), (2,3), (3,4)\n])\n\n# Basic motifs\nchain = nx.DiGraph()\nchain.add_nodes_from(range(3))\nchain.add_edges_from([\n (0,1), (1,2)\n])\n\nfork = nx.DiGraph()\nfork.add_nodes_from(range(3))\nfork.add_edges_from([\n (1,0), (1,2)\n])\n\ncoll = nx.DiGraph()\ncoll.add_nodes_from(range(3))\ncoll.add_edges_from([\n (0,1), (2,1)\n])\n\n@pytest.mark.parametrize(\n 'g, path, z, blocking_node',\n [\n (kite, [0,1,3], [1,2], 1), # 1 is chain and in [1,2]\n (kite, [0,2,3], [1,2], 2), # 2 is chain and in [1,2]\n (kite, [1,0,2], [0], 0), # 0 is fork and in [0]\n (kite, [1,3,4], [3], 3), # 3 is chain and in [3]\n (kite, [2,3,4], [3], 3), # 3 is chain and in [3]\n (kite, [1,3,2], [0], 3), # 3 is collider and not in [0]\n ]\n)\ndef test_path_blocks(g, path, z, blocking_node):\n blocks, blocking_node_ = path_d_separates(g, path, z, return_node=True)\n assert blocks\n assert blocking_node_ == blocking_node\n\n@pytest.mark.parametrize(\n 'g, path, z',\n [\n (kite, [0,1,3], []), # chain\n (kite, [1,3,2], [3]), # 3 is collider and in [3]\n (kite, [1,3,2], [4]), # 4 descends from collider 3 and is in [4]\n (kite, [1,3,2], [3,4]), # both conditions above\n (kite, [1,3,2], [0,3,4]), # adding a fork (0) doesn't fix it\n ]\n)\ndef test_path_not_blocks(g, path, z):\n blocks, blocking_node_ = path_d_separates(g, path, z, return_node=True)\n assert not blocks\n assert blocking_node_ == None\n\n@pytest.mark.parametrize(\n 'g, x, y, z',\n [\n (chain, [0], [2], [1]),\n (fork, [0], [2], [1]),\n (coll, [0], [2], []),\n (kite, [0], [3], [1,2]), # 2 chains\n (kite, [0], [4], [1,2]), # 2 chains\n (kite, [1], [4], [3]), # 1 chain\n (kite, [2], [4], [3]), # 1 chain\n (kite, [1], [2], [0]), # 1 fork\n ]\n)\ndef test_blocks(g, x, y, z):\n assert d_separates(g, x, y, z)\n\n@pytest.mark.parametrize(\n 'g, x, y, z',\n [\n (chain, [0], [2], []),\n (fork, [0], [2], []),\n (coll, [0], [2], [1]),\n (kite, [1], [2], [3]), # 1 collider\n (kite, [1], [2], [4]), # 1 desc of collider\n (kite, [1], [2], [0,3]), # 1 fork + 1 collider\n (kite, [1], [2], [0,3,4]), # 1 fork + 1 collider + 1 desc of collider\n ]\n)\ndef test_not_blocks(g, x, y, z):\n assert not d_separates(g, x, y, z)\n\n\n## random variables ##\nfrom .causal_model import RandomVariable as RV\nfrom .causal_model import RandomVector as RVV\n\ndomains = {\n '_bool' : (False, True),\n '_cat' : ('a', 'b', 'c'),\n '_set' : set(range(10)),\n '_list' : [x**2 for x in range(5)],\n '_interval' : np.linspace(0, 1, 100),\n}\n\npmfs = {\n 'unif_n': lambda n: lambda arg: 1/n,\n 'delta': lambda x0: lambda arg: 1 if arg==x0 else 0,\n}\n\neps = 1e-10\n\n@pytest.mark.parametrize(\n 'domain, name, pmf',\n [(dom, name, pmfs['unif_n']) for name, dom in domains.items()],\n)\ndef test_unif_rv_normalized(domain, name, pmf, tol=eps):\n rv = RV(domain, pmf(len(domain)), name)\n assert abs(sum(rv.pmf(x) for x in rv.domain)-1) < tol\n\n@pytest.mark.parametrize(\n 'domain, name, pmf',\n [(dom, name, pmfs['delta']) for name, dom in domains.items()],\n)\ndef test_delta_rv_normalized(domain, name, pmf, tol=eps):\n for x0 in domain:\n rv = RV(domain, pmf(x0), name)\n assert abs(sum(rv.pmf(x) for x in rv.domain)-1) < tol\n\n@pytest.mark.parametrize(\n 'n',\n range(1, 4)\n)\ndef test_unif_vectors(n, tol=eps):\n doms = lproduct(domains.values(), repeat=n)\n\n for ds in doms:\n rvs = [RV(d, pmfs['unif_n'](len(d))) for d in ds]\n rvv = RVV.from_list(rvs)\n \n _len = lambda it: sum(1 for _ in it)\n _prod = lambda it: reduce(lambda x, y: x*y, it)\n \n assert _len(rvv.domain) == _prod(map(_len, [rv.domain for rv in rvs]))\n assert abs(sum(rvv.pmf(*x) for x in rvv.domain) - 1) < tol\n \n for x in rvv.domain:\n assert rvv.pmf(*x) == _prod(rv.pmf(xx) for xx, rv in zip(x, rvs))\n\n# TODO test CausalModel and CausalMechanism","sub_path":"code/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"566478782","text":"import re\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nnfl_draft = [\n './nfl_draft_data/draft_data_cumulative.tsv'\n]\n\nteam_df = pd.concat(pd.read_table(i) for i in nfl_draft)\npositions = team_df.groupby(['Year', 'Round', 'Team'])['Position'].value_counts().index.tolist()\ncounts = team_df.groupby(['Year', 'Round', 'Team'])['Position'].value_counts().tolist()\ncount_df = pd.DataFrame(columns=['Round','Year','Position','Count','Team'])\ncount_df['Position'] = [item[3] for item in positions]\ncount_df['Team'] = [item[2] for item in positions]\ncount_df['Round'] = [item[1] for item in positions]\ncount_df['Year'] = [item[0] for item in positions]\ncount_df['Count'] = counts\n\ncount_df.to_csv('draft_data_cumulative_sums.tsv', sep='\\t', index=False)\n\n# for i in nfl_draft:\n # team_df = pd.read_table(i)\n # title = re.search('(?<=./nfl_draft_data/)....', i)\n # Bootleg Regex \n # team_df.index = team_df['Pick #']\n # team_df['Position'].value_counts().plot.bar()\n # positions = team_df['Position'].value_counts().index.tolist()\n # counts = team_df['Position'].value_counts().tolist()\n # count_df = pd.DataFrame(columns=['Position','Count'])\n # count_df['Position'] = positions\n # count_df['Count'] = counts\n # team_df['Team'] = [\n # \"Offensive\" if ele == \"C\" or ele == \"G\" \n # or ele == \"OT\" or ele == \"OL\" or ele == \"QB\"\n # or ele == \"RB\" or ele == \"WR\" or ele == \"TE\" \n # or ele == \"TB\" or ele == \"FB\" or ele == \"HB\" else \"Defensive\" \n # if ele == \"CB\" or ele == \"DB\" or ele == \"DE\" or ele == \"DL\"\n # or ele == \"DT\" or ele == \"FS\" or ele == \"LB\" or ele == \"NT\" \n # or ele == \"S\" or ele == \"SS\" else \"Special\" if isinstance(ele, str) else \"N/A\" for ele in team_df[\"Position\"]]\n # team_df['Player'] = team_df['Player'].str.replace('†', '')\n # team_df['Year'] = title[0]\n # team_df.to_csv(i, sep='\\t', index=False)","sub_path":"football.py","file_name":"football.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"59788550","text":"class Solution:\n def equationsPossible(self, equations: List[str]) -> bool:\n unequal_equations = list()\n group = dict()\n i = 0\n for e in equations:\n if e[0] == e[-1] and e[1] == '!':\n return False\n if e[1] == '=':\n if not e[0] in group and not e[-1] in group:\n group[e[0]] = i\n group[e[-1]] = i\n i = i + 1\n elif e[0] in group and not e[-1] in group:\n group[e[-1]] = group[e[0]]\n elif not e[0] in group and e[-1] in group:\n group[e[0]] = group[e[-1]]\n \n elif group[e[0]] != group[e[-1]]:\n min_index = min(group[e[0]], group[e[-1]])\n max_index = max(group[e[0]], group[e[-1]])\n for g in group:\n if group[g] == max_index:\n group[g] = min_index\n else:\n unequal_equations.append(e)\n \n conflict = list()\n print(group)\n print(unequal_equations)\n for u in unequal_equations:\n if not u[0] in group and not u[-1] in group:\n group[u[0]] = i\n i = i + 1\n group[u[-1]] = i\n i = i + 1\n conflict.append((group[u[0]], group[u[-1]]))\n elif u[0] in group and not u[-1] in group:\n group[u[-1]] = i\n i = i + 1\n conflict.append((group[u[0]], group[u[-1]]))\n elif not u[0] in group and u[-1] in group:\n group[u[0]] = i\n i = i + 1\n conflict.append((group[u[0]], group[u[-1]]))\n elif u[0] in group and u[-1] in group:\n if group[u[0]] == group[u[-1]]:\n return False\n else:\n conflict.append((group[u[0]], group[u[-1]]))\n\n return True\n","sub_path":"2020_02_13/runorz_990.py","file_name":"runorz_990.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"215732410","text":"items = [1, 2, 3, 4, 5]\nsquared = []\nfor x in items:\n\tsquared.append(x ** 2)\nprint (squared)\n\n\n## Custom map function\ndef mymap(aFunc, aSeq):\n\tresult = []\n\tfor x in aSeq: result.append(aFunc(x))\n\treturn result\n\ndef sqr(x): return x ** 2\n\nsquared = mymap(sqr, items)\nprint (squared)\n\n# Since it's a built-in, map is always available and always works the same way.\n# It also has some performance benefit because it is usually faster than a manually coded for loop.\n# On top of those, map can be used in more advance way. For example, given multiple sequence arguments, \n# it sends items taken form sequences in parallel as distinct arguments to the function:\n\nsquared = list (map (sqr, items))\nprint (squared)\n\nsquared = list(map((lambda x: x **2), items))\nprint (squared)\n\n# The syntax of map function looks like this\n# map(aFunction, aSequence)\n\ndef square(x):\n return (x**2)\ndef cube(x):\n return (x**3)\n\nfuncs = [square, cube]\nfor r in range(5):\n value = list(map(lambda x: x(r), funcs))\n print (value)\n\n# Advanced usage of map\npowered = list(map(pow,[2, 3, 4], [10, 11, 12]))\nprint (powered)\n\nfiltered = list( filter((lambda x: x < 0), range(-5,5)))\nprint (filtered)\n\nfrom functools import reduce\n\n# Custom version of reduce\ndef myreduce(fnc, seq):\n\ttally = seq[0]\n\tfor next in seq[1:]:\n\t\ttally = fnc(tally, next)\n\treturn tally\n\nresult = myreduce( (lambda x, y: x * y), items)\nprint (result)\nresult = reduce ((lambda x, y : x * y) , items)\nprint (result)\n\nL = ['Testing ', 'shows ', 'the ', 'presence', ', ','not ', 'the ', 'absence ', 'of ', 'bugs']\nprint (' '.join(L))","sub_path":"pythonsite/map-reduce.py","file_name":"map-reduce.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"282229600","text":"# Copyright 2019 XCI JV, LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom flask import Flask, jsonify, render_template, redirect, request\nfrom flask.helpers import url_for\nfrom flask_pyoidc.flask_pyoidc import OIDCAuthentication\nfrom flask_pyoidc.provider_configuration import ProviderConfiguration, ClientMetadata, ProviderMetadata\nfrom flask_pyoidc.pyoidc_facade import PyoidcFacade\nfrom flask_pyoidc.user_session import UserSession\nimport flask\nimport json\nimport logging\nimport os\nimport requests\nimport binascii\nimport urllib.parse\n\nPORT = os.getenv('PORT')\nBASE_URL_SUBDOMAIN = os.getenv('BASE_URL_SUBDOMAIN')\nBASE_URL_DOMAIN = os.getenv('BASE_URL_DOMAIN')\nstate_key = ''\nport_is_valid = not bool(PORT == None)\ncombo = '%s.%s' % (BASE_URL_SUBDOMAIN, BASE_URL_DOMAIN)\nBASE_URL = BASE_URL_DOMAIN if BASE_URL_SUBDOMAIN == None else combo\nif port_is_valid:\n BASE_URL += ':%s' % (PORT)\n\n# no session cookies on localhost\nsession_cookie_domain = None if BASE_URL_DOMAIN == 'localhost' else BASE_URL_DOMAIN\n\napplication = Flask(__name__)\napplication.config.update({'SERVER_NAME': BASE_URL,\n 'SESSION_COOKIE_DOMAIN': session_cookie_domain,\n 'SECRET_KEY': os.getenv('CLIENT_SECRET_BASE')})\n\nauth_params = {'scope': ['openid', 'profile']}\nPROVIDER_NAME = 'xci'\n\nopenid = OIDCAuthentication({})\n\n\n@application.route('/')\ndef index():\n return render_template('home.html')\n\n\n@application.route('/auth')\ndef discovery():\n client_id = os.getenv('CLIENT_ID')\n url = url_for('login', _external=True, _scheme='https')\n global state_key\n state_key = urllib.parse.quote(binascii.b2a_hex(os.urandom(20)))\n return redirect('https://discoveryui.myzenkey.com/ui/visual-code?client_id=%s&redirect_uri=%s&state=%s' % (client_id,\n url, state_key))\n\n\n@application.route('/login')\ndef login():\n login_hint_token = request.args.get('login_hint_token')\n state = request.args.get('state')\n mccmnc = request.args.get('mccmnc')\n\n name = getNameForCurrentSession()\n if state != state_key:\n return redirect('/')\n elif name != None:\n return redirect('/logged-in')\n else:\n provider_metadata = get_provider_metadata(login_hint_token, mccmnc)\n client_metadata = get_client_metadata()\n if provider_metadata != None and client_metadata != None:\n # configure provider (i.e. MNO) and client\n config = ProviderConfiguration(\n client_metadata=client_metadata,\n auth_request_params=auth_params,\n provider_metadata=provider_metadata)\n # TODO: once dynamic discovery with XCI providers is working, touch this up / ensure this works to manage logging in with different providers\n url = url_for('login', _external=True)\n # use pyoidc internals to add provider / client to session, no need to store\n UserSession(flask.session, PROVIDER_NAME)\n # instantiate OIDC through facade\n client = PyoidcFacade(config, url)\n # authenticate\n return openid._authenticate(client)\n return render_template('home.html', error='Could not build a valid configuration for the selected provider. Error detail: MCC=%s, MNC=%s' % (mcc, mnc))\n\n\n@application.route('/logged-in')\ndef loginGoogle():\n name = getNameForCurrentSession()\n if name != None:\n return render_template('logged-in.html', name=name)\n else:\n return redirect('/')\n\n\n@application.route('/logout')\ndef logout():\n try:\n openid._logout()\n return redirect('/')\n except:\n # already logged out\n return redirect('/')\n\n\ndef getNameForCurrentSession():\n try:\n user_session = UserSession(flask.session, PROVIDER_NAME)\n if (user_session):\n info = user_session.userinfo\n if info:\n name = info['name']\n return name\n return None\n except Exception as e:\n logging.log(40, e)\n return None\n\n\ndef get_provider_metadata(login_hint_token, mccmnc):\n client_id = os.getenv('CLIENT_ID')\n d_url = os.getenv('DISCOVERY_URL')\n url = '%s?config=true&client_id=%s&login_hint_token=%s&mccmnc=%s' % (\n d_url, client_id, login_hint_token, mccmnc)\n response = requests.get(url)\n response_json = response.json()\n if (response_json == {} or response_json['issuer'] == None):\n return None\n\n metadata = ProviderMetadata(\n issuer=response_json['issuer'],\n authorization_endpoint=response_json['authorization_endpoint'],\n jwks_uri=response_json['jwks_uri'],\n token_endpoint=response_json['token_endpoint'],\n userinfo_endpoint=response_json['userinfo_endpoint'])\n return metadata\n\n\ndef get_client_metadata():\n client_id = os.getenv('CLIENT_ID')\n client_secret = os.getenv('CLIENT_SECRET')\n if client_id == None or client_secret == None:\n return None\n else:\n return ClientMetadata(client_id, client_secret)\n","sub_path":"Examples/Python/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"65274701","text":"def get_file(file_name):\r\n matrix=[]\r\n with open(file_name,\"r\") as file:\r\n f=file.readlines()\r\n for i in f:\r\n a=i.split(\"\\n\")[0].split(\" \")\r\n matrix.append([float(j) for j in a])\r\n return matrix\r\n#partial_pivot_swap swaps t row and s row and does not check zero at any position\r\ndef partial_pivot_swap(ar,t,s,b=None):\r\n n = len(ar)\r\n if t<0 or t>=n: return\r\n if s<0 or s>=n: return\r\n ar[s], ar[t] = ar[t], ar[s]\r\n if b != None:\r\n b[s], b[t] = b[t], b[s]\r\n#partial_pivot swap only when the diagonal element is zero to a below row when same column element is non-zero\r\ndef partial_pivot(ar,b):\r\n n=len(ar)\r\n zero=[0 for _ in range(n)]\r\n for i in range(n):\r\n if ar[i][i]==0:\r\n if ar[i]==zero:\r\n return False\r\n j=i+1\r\n while j len(self.result) - 1:\n self.result.append([])\n\n self.result[depth].append(root.val)\n\n self.calc(root.left, depth + 1)\n self.calc(root.right, depth + 1)\n\n def levelOrder(self, root):\n if root is None:\n return []\n self.calc(root, 1)\n\n del self.result[0]\n self.result.reverse()\n return self.result\n\n\nroot = TreeNode(3)\nroot.left = TreeNode(9)\nroot.right = TreeNode(20)\nroot.right.left = TreeNode(15)\nroot.right.right = TreeNode(7)\n#root = TreeNode(1)\n#root.left = TreeNode(2)\ns = Solution()\nprint(s.levelOrder(root))\n \n\n","sub_path":"BinaryTreeLevelOrderTraversalII.py","file_name":"BinaryTreeLevelOrderTraversalII.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"535214096","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/xdis/opcodes/opcode_27pypy.py\n# Compiled at: 2020-04-18 17:55:45\n\"\"\"\nPYPY 2.7 opcodes\n\nThis is a like Python 2.7's opcode.py with some classification\nof stack usage.\n\"\"\"\nimport xdis.opcodes.opcode_27 as opcode_27\nfrom xdis.opcodes.base import def_op, finalize_opcodes, init_opdata, jrel_op, name_op, nargs_op, update_pj3\nversion = 2.7\nl = locals()\ninit_opdata(l, opcode_27, version, is_pypy=True)\nname_op(l, 'LOOKUP_METHOD', 201, 1, 2)\nnargs_op(l, 'CALL_METHOD', 202, -1, 1)\nl['hasnargs'].append(202)\ndef_op(l, 'BUILD_LIST_FROM_ARG', 203)\njrel_op(l, 'JUMP_IF_NOT_DEBUG', 204, conditional=True)\nimport sys\nif sys.version_info[:3] >= (2, 7, 13) and sys.version_info[4] >= 42:\n def_op(l, 'LOAD_REVDB_VAR', 205)\nupdate_pj3(globals(), l)\nfinalize_opcodes(l)","sub_path":"pycfiles/xdis-4.4.0-py2.4/opcode_27pypy.py","file_name":"opcode_27pypy.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"348014764","text":"import twitter\n\nif __name__ == \"__main__\":\n\timport sys\n\tfrom t import CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET\n\n\t# Create an Api instance.\n\tapi = twitter.Api(consumer_key=CONSUMER_KEY,\n\t\t\t\t\tconsumer_secret=CONSUMER_SECRET,\n\t\t\t\t\taccess_token_key=ACCESS_TOKEN,\n\t\t\t\t\taccess_token_secret=ACCESS_TOKEN_SECRET,\n\t\t\t\t\tsleep_on_rate_limit=True)\n\t\n\tcursor = -1\n\tif len(sys.argv) == 2:\n\t\tcursor = int(sys.argv[1])\n\n\twhile True:\n\t\tprint(\"Getting blocks. Cursor: {}\".format(cursor))\n\t\tnext_cursor, previous_cursor, my_blocks = api.GetBlocksIDsPaged(cursor = cursor)\n\t\tprint(\"Got {} blocks\".format(len(my_blocks)))\n\n\t\tif next_cursor:\n\t\t\tcursor = next_cursor\n\n\t\tfor user_id in my_blocks:\n\t\t\tuser = api.DestroyBlock(user_id = user_id)\n\t\t\tprint(\"Unblocked {} ({})\".format(user.name, user.screen_name))\n\n\t\tif next_cursor == 0 or next_cursor == previous_cursor:\n\t\t\tbreak\n\n\tprint(\"Unblocking complete\")\n","sub_path":"unblock.py","file_name":"unblock.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"105308250","text":"# Create your views here.# Create your views here.\nfrom django.template import Context, loader\nfrom django.http import HttpResponse\nfrom django.shortcuts import render_to_response\nfrom django.forms import ModelForm\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import HttpResponseRedirect\nfrom models import Post, Comment \n\n\ndef post_list(request):\n posts = Post.objects.all()\n \"\"\"html=''\n for i in posts:\n html+= str(i) + '
'\n return HttpResponse(html )\n \"\"\"\n t=loader.get_template('blog/post_list.html')\n c=Context ({'posts':posts})\n return HttpResponse(t.render(c))\n\nclass CommentForm(ModelForm):\n class Meta:\n model = Comment\n exclude = ['post']\n\n@csrf_exempt\ndef post_detail(request, id, showComments):\n post=Post.objects.get(pk=id)\n if request.method == 'POST':\n comment = Comment(post=post)\t\n form = CommentForm(request.POST, instance = comment)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(request.path)\n else:\n form = CommentForm()\n\t\n comment=\"\"\n if showComments != None:\n comment=Comment.objects.filter(post=id)\n \"\"\"if (showComments):\n\tout='

'+post.title+'

'+'
'+post.body\n else:\n\tout=post.title+'
'\n return HttpResponse(out)\"\"\"\n \n return render_to_response('blog/post_detail.html',{'posts':post, 'comments':comment, 'form': form})\n\n@csrf_exempt\ndef edit_comment(request, id,):\n post=Post.objects.get(pk=id)\n if request.method == 'POST':\t\n form = CommentForm(request.POST, instance = post)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(request.path)\n else:\n form = CommentForm()\n return render_to_response('blog/edit_comment.html',{'editcomment':post , 'form':form})\n \t\ndef post_search(request, term):\n\tposts=Post.objects.filter(title__contains=term)\n\treturn render_to_response('blog/post_search.html',{'posts':post, 'term':term})\n\t\n\ndef home(request):\n \"\"\"print 'it works'\n return HttpResponse('hello world. Ete zene?')\"\"\"\n return render_to_response('blog/base.html',{}) \n\n\n","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"642199828","text":"import cv2\n\nclass VideoCapture:\n def __init__(self, cameraNumber):\n self.cap = cv2.VideoCapture(cameraNumber)\n self.frame = 0\n self.gray = 0\n \n def reading(self):\n _, self.frame = self.cap.read();\n \n def grayConversor(self):\n self.gray = cv2.cvtColor(self.frame, cv2.COLOR_RGB2GRAY)\n \n def showCamera(self, cameraNames):\n # cameraNames are send as an array of names\n for camera in cameraNames:\n if camera == \"gray\":\n if(self.gray.any()):\n cv2.imshow(\"Gray camera\", self.gray)\n else:\n # on debug mode\n print(\"This camera is not initialized.\")\n \n def stop(self):\n self.cap.release();\n cv2.destroyAllWindows()\n\n## end VideoCapture class\n\ndef mainLoop():\n #initializing opencv2\n videocap = VideoCapture(0)\n \n # camera capture loop\n while(True):\n #capturing frame by frame\n videocap.reading()\n \n # converting to gray\n videocap.grayConversor()\n \n #display\n camerasAvailable = [\"gray\"]\n videocap.showCamera(camerasAvailable)\n \n #listening to close event\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break;\n\n videocap.stop()\n\nif __name__==\"__main__\":\n mainLoop()\n \n \n \n","sub_path":"visionSystem/simpleVision/visionModule.py","file_name":"visionModule.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"496416697","text":"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.patches\n\nimport skimage\nimport skimage.measure\nimport skimage.color\nimport skimage.restoration\nimport skimage.io\nimport skimage.filters\nimport skimage.morphology\nimport skimage.segmentation\n\nfrom nn import *\nfrom q4 import *\n# do not include any more libraries here!\n# no opencv, no sklearn, etc!\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nwarnings.simplefilter(action='ignore', category=UserWarning)\nfor img in os.listdir('../images'):\n im1 = skimage.img_as_float(skimage.io.imread(os.path.join('../images',img)))\n bboxes, bw = findLetters(im1)\n\n plt.imshow(bw)\n for bbox in bboxes:\n minr, minc, maxr, maxc = bbox\n rect = matplotlib.patches.Rectangle((minc, minr), maxc - minc, maxr - minr,\n fill=False, edgecolor='red', linewidth=2)\n plt.gca().add_patch(rect)\n plt.show()\n\n # find the rows using..RANSAC, counting, clustering, etc.\n y_vals = [bbox[2]-bbox[0] for bbox in bboxes]\n avg_h = sum(y_vals)/len(y_vals) # average Height of boxes\n\n centers = [[(bbox[3] + bbox[1])//2, (bbox[2] + bbox[0])//2, bbox[3]-bbox[1],bbox[2]-bbox[0]] for bbox in bboxes]\n centers.sort(key = lambda i:i[1]) # Sort based on x1 values\n x_init = centers[0][1]\n\n row_val = []\n row_list= []\n\n for coord in centers:\n if coord[1] > x_init + avg_h:\n row_val = sorted(row_val, key=lambda coord: coord[0]) #sort based on y values\n row_list.append(row_val)\n x_init = coord[1]\n row_val = [coord]\n\n else:\n row_val.append(coord)\n\n\n row_val = sorted(row_val, key=lambda coord:coord[0]) #sort based on y values\n\n row_list.append(row_val) # coordinates of rectangle rows\n\n im_mat = []\n\n for row in row_list:\n im_row = []\n\n for x_cord, y_cord, w, h in row:\n im_crop = bw[y_cord - h//2 : y_cord + h//2, x_cord - w//2 : x_cord + w//2] # crop the bounding boxes\n\n # note.. before you flatten, transpose the image (that's how the dataset is!)\n # consider doing a square crop, and even using np.pad() to get your images looking more like the dataset\n # pad_h = (np.abs(h-w)//2) +(np.max([h,w])//20)\n if h > w:\n pad_h = h//20\n pad_w = (h-w)//2 + pad_h\n\n else:\n pad_w = w//20\n pad_h = (w-h)//2 + pad_h\n\n im_crop = np.pad(im_crop,((pad_h, pad_h),(pad_w,pad_w)),'constant',constant_values=(1,1))\n im_crop = skimage.transform.resize(im_crop, (32,32))\n im_crop = (skimage.morphology.erosion(im_crop)).T\n crop_flat = im_crop.flatten()\n im_row.append(crop_flat)\n\n im_arr = np.asarray(im_row)\n im_mat.append(im_arr)\n\n\n # load the weights\n # run the crops through your neural network and print them out\n import pickle\n import string\n letters = np.array([_ for _ in string.ascii_uppercase[:26]] + [str(_) for _ in range(10)])\n params = pickle.load(open('q3_weights.pickle','rb'))\n\n for im_arr in im_mat:\n h1 = forward(im_arr,params,'layer1')\n probs = forward(h1,params,'output',softmax)\n pred_loc = np.argmax(probs, axis=1)\n pred_val = ''\n for i in pred_loc:\n pred_val += letters[i]\n print(pred_val)","sub_path":"Neural_Network_Letter_Recognition/hw5/akshitm/run_q4.py","file_name":"run_q4.py","file_ext":"py","file_size_in_byte":3407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"207615001","text":"\n__all__ = []\n\ncommands = {}\n\nextended_commands = []\n\ndef update_helps(commands):\n\tif commands:\n\t\timport lixian_help\n\t\tlixian_help.extended_usage = '''\\nExtended commands:\n''' + lixian_help.join_commands([(x[0], x[1]) for x in commands])\n\n\tfor name, usage, doc in commands:\n\t\tsetattr(lixian_help, name, doc)\n\ndef register_command(command):\n\textended_commands.append(command)\n\tglobal commands\n\tcommands = dict((x.command_name, x) for x in extended_commands)\n\tupdate_helps(sorted((x.command_name, x.command_usage, x.command_help) for x in extended_commands))\n\n\ndef command(name='', usage='', help=''):\n\tdef as_command(f):\n\t\tassert usage, 'missing command usage: ' + f.func_name\n\t\tf.command_name = name or f.func_name.replace('_', '-')\n\t\tf.command_usage = usage\n\t\tf.command_help = help or f.func_doc\n\t\timport textwrap\n\t\tif f.command_help:\n\t\t\tf.command_help = textwrap.dedent(f.command_help)\n\t\tregister_command(f)\n\t\treturn f\n\treturn as_command\n\ndef load_commands():\n\timport os\n\timport os.path\n\timport re\n\tcommand_dir = os.path.dirname(__file__)\n\tcommands = os.listdir(command_dir)\n\tcommands = [re.sub(r'\\.py$', '', p) for p in commands if p.endswith('.py') and not p.startswith('_')]\n\tfor p in commands:\n\t\t__import__('lixian_plugins.commands.' + p)\n\n\n","sub_path":"lixian_plugins/commands/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"385952046","text":"# coding: utf-8\n\n\"\"\"\n Payment Gateway API Specification.\n\n The documentation here is designed to provide all of the technical guidance required to consume and integrate with our APIs for payment processing. To learn more about our APIs please visit https://docs.firstdata.com/org/gateway. # noqa: E501\n\n The version of the OpenAPI document: 21.1.0.20210122.001\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass FraudSettings(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'blocked_items': 'BlockedItems',\n 'maximum_purchase_amount': 'list[MaximumPurchaseAmount]',\n 'lockout_time': 'LockoutTime',\n 'country_profile': 'CountryProfile'\n }\n\n attribute_map = {\n 'blocked_items': 'blockedItems',\n 'maximum_purchase_amount': 'maximumPurchaseAmount',\n 'lockout_time': 'lockoutTime',\n 'country_profile': 'countryProfile'\n }\n\n def __init__(self, blocked_items=None, maximum_purchase_amount=None, lockout_time=None, country_profile=None): # noqa: E501\n \"\"\"FraudSettings - a model defined in OpenAPI\"\"\" # noqa: E501\n\n self._blocked_items = None\n self._maximum_purchase_amount = None\n self._lockout_time = None\n self._country_profile = None\n self.discriminator = None\n\n if blocked_items is not None:\n self.blocked_items = blocked_items\n if maximum_purchase_amount is not None:\n self.maximum_purchase_amount = maximum_purchase_amount\n if lockout_time is not None:\n self.lockout_time = lockout_time\n if country_profile is not None:\n self.country_profile = country_profile\n\n @property\n def blocked_items(self):\n \"\"\"Gets the blocked_items of this FraudSettings. # noqa: E501\n\n\n :return: The blocked_items of this FraudSettings. # noqa: E501\n :rtype: BlockedItems\n \"\"\"\n return self._blocked_items\n\n @blocked_items.setter\n def blocked_items(self, blocked_items):\n \"\"\"Sets the blocked_items of this FraudSettings.\n\n\n :param blocked_items: The blocked_items of this FraudSettings. # noqa: E501\n :type: BlockedItems\n \"\"\"\n\n self._blocked_items = blocked_items\n\n @property\n def maximum_purchase_amount(self):\n \"\"\"Gets the maximum_purchase_amount of this FraudSettings. # noqa: E501\n\n\n :return: The maximum_purchase_amount of this FraudSettings. # noqa: E501\n :rtype: list[MaximumPurchaseAmount]\n \"\"\"\n return self._maximum_purchase_amount\n\n @maximum_purchase_amount.setter\n def maximum_purchase_amount(self, maximum_purchase_amount):\n \"\"\"Sets the maximum_purchase_amount of this FraudSettings.\n\n\n :param maximum_purchase_amount: The maximum_purchase_amount of this FraudSettings. # noqa: E501\n :type: list[MaximumPurchaseAmount]\n \"\"\"\n\n self._maximum_purchase_amount = maximum_purchase_amount\n\n @property\n def lockout_time(self):\n \"\"\"Gets the lockout_time of this FraudSettings. # noqa: E501\n\n\n :return: The lockout_time of this FraudSettings. # noqa: E501\n :rtype: LockoutTime\n \"\"\"\n return self._lockout_time\n\n @lockout_time.setter\n def lockout_time(self, lockout_time):\n \"\"\"Sets the lockout_time of this FraudSettings.\n\n\n :param lockout_time: The lockout_time of this FraudSettings. # noqa: E501\n :type: LockoutTime\n \"\"\"\n\n self._lockout_time = lockout_time\n\n @property\n def country_profile(self):\n \"\"\"Gets the country_profile of this FraudSettings. # noqa: E501\n\n\n :return: The country_profile of this FraudSettings. # noqa: E501\n :rtype: CountryProfile\n \"\"\"\n return self._country_profile\n\n @country_profile.setter\n def country_profile(self, country_profile):\n \"\"\"Sets the country_profile of this FraudSettings.\n\n\n :param country_profile: The country_profile of this FraudSettings. # noqa: E501\n :type: CountryProfile\n \"\"\"\n\n self._country_profile = country_profile\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, FraudSettings):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"openapi_client/models/fraud_settings.py","file_name":"fraud_settings.py","file_ext":"py","file_size_in_byte":5917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"512048749","text":"#!/usr/bin/env python\r\n# coding:utf-8\r\n\r\nimport sys\r\nimport os\r\nimport glob\r\n\r\nsys.path += glob.glob('%s/*.egg'%os.path.dirname(os.path.abspath(__file__)))\r\n\r\nDummyGeventObject = type('DummyGeventObject', (object,), {'__init__':lambda *a,**kw:sys.stdout.write('*** WARNING: Please install python-gevent 1.0 ***\\n\\n')})\r\ntry:\r\n import gevent\r\n import gevent.core\r\n import gevent.queue\r\n import gevent.monkey\r\n import gevent.coros\r\n import gevent.server\r\n import gevent.pool\r\n import gevent.event\r\n import gevent.timeout\r\n gevent.monkey.patch_all(dns=gevent.version_info[0]>=1)\r\nexcept ImportError:\r\n import platform\r\n sys.stderr.write('WARNING: python-gevent not installed. Please ')\r\n if sys.platform.startswith('linux'):\r\n sys.stderr.write('`wget --no-check-certificate --header=\"Host: goagent.googlecode.com\" http://www.google.cn/files/gevent-1.0dev-linux-x86.egg`\\n')\r\n elif sys.platform == 'darwin' and platform.processor() == 'i386':\r\n sys.stderr.write('`wget --no-check-certificate --header=\"Host: goagent.googlecode.com\" http://www.google.cn/files/gevent-1.0dev-macosx-intel.egg`\\n')\r\n elif os.name == 'nt':\r\n sys.stderr.write('visit `https://github.com/SiteSupport/gevent/downloads`\\n')\r\n else:\r\n sys.stderr.write('`sudo easy_install gevent`\\n')\r\n import threading\r\n Queue = __import__('queue') if sys.version[0] == '3' else __import__('Queue')\r\n SocketServer = __import__('socketserver') if sys.version[0] == '3' else __import__('SocketServer')\r\n\r\n def GeventImport(name):\r\n import sys\r\n sys.modules[name] = type(sys)(name)\r\n return sys.modules[name]\r\n def GeventSpawn(target, *args, **kwargs):\r\n return threading._start_new_thread(target, args, kwargs)\r\n def GeventSpawnLater(seconds, target, *args, **kwargs):\r\n def wrap(*args, **kwargs):\r\n import time\r\n time.sleep(seconds)\r\n return target(*args, **kwargs)\r\n return threading._start_new_thread(wrap, args, kwargs)\r\n class GeventServerStreamServer(SocketServer.ThreadingTCPServer):\r\n allow_reuse_address = True\r\n def finish_request(self, request, client_address):\r\n self.RequestHandlerClass(request, client_address)\r\n class GeventServerDatagramServer(SocketServer.ThreadingUDPServer):\r\n allow_reuse_address = True\r\n def __init__(self, server_address, *args, **kwargs):\r\n SocketServer.ThreadingUDPServer.__init__(self, server_address, self.__class__.RequestHandlerClass, *args, **kwargs)\r\n self._writelock = threading.Semaphore()\r\n def sendto(self, *args):\r\n self._writelock.acquire()\r\n try:\r\n self.socket.sendto(*args)\r\n finally:\r\n self._writelock.release()\r\n @staticmethod\r\n def RequestHandlerClass(request, client_addr, server):\r\n data, server_socket = request\r\n return server.handle(data, client_addr)\r\n def handle(self, data, address):\r\n raise NotImplemented()\r\n class GeventPoolPool(object):\r\n def __init__(self, size):\r\n self._lock = threading.Semaphore(size)\r\n def __target_wrapper(self, target, args, kwargs):\r\n t = threading.Thread(target=target, args=args, kwargs=kwargs)\r\n try:\r\n t.start()\r\n t.join()\r\n except Exception as e:\r\n logging.error('threading.Thread target=%r error:%s', target, e)\r\n finally:\r\n self._lock.release()\r\n def spawn(self, target, *args, **kwargs):\r\n self._lock.acquire()\r\n return threading._start_new_thread(self.__target_wrapper, (target, args, kwargs))\r\n\r\n gevent = GeventImport('gevent')\r\n gevent.queue = GeventImport('gevent.queue')\r\n gevent.coros = GeventImport('gevent.coros')\r\n gevent.server = GeventImport('gevent.server')\r\n gevent.pool = GeventImport('gevent.pool')\r\n gevent.sleep = __import__('time').sleep\r\n\r\n gevent.queue.Queue = Queue.Queue\r\n gevent.queue.PriorityQueue = Queue.PriorityQueue\r\n gevent.queue.Empty = Queue.Empty\r\n gevent.coros.Semaphore = threading.Semaphore\r\n gevent.getcurrent = threading.currentThread\r\n gevent.spawn = GeventSpawn\r\n gevent.spawn_later = GeventSpawnLater\r\n gevent.server.StreamServer = GeventServerStreamServer\r\n gevent.server.DatagramServer = GeventServerDatagramServer\r\n gevent.pool.Pool = GeventPoolPool\r\n\r\n gevent.version_info = (1, 0, 0, 'fake')\r\n\r\n del GeventImport, GeventSpawn, GeventSpawnLater,\\\r\n GeventServerStreamServer, GeventServerDatagramServer, GeventPoolPool\r\n\r\nimport re\r\nimport time\r\nimport socket\r\nimport struct\r\nimport random\r\ntry:\r\n import ctypes\r\nexcept ImportError:\r\n ctypes = None\r\n\r\nclass Logging(type(sys)):\r\n CRITICAL = 50\r\n FATAL = CRITICAL\r\n ERROR = 40\r\n WARNING = 30\r\n WARN = WARNING\r\n INFO = 20\r\n DEBUG = 10\r\n NOTSET = 0\r\n def __init__(self, *args, **kwargs):\r\n self.level = self.__class__.INFO\r\n if self.level > self.__class__.DEBUG:\r\n self.debug = self.dummy\r\n self.__write = __write = sys.stderr.write\r\n self.isatty = getattr(sys.stderr, 'isatty', lambda:False)()\r\n self.__set_error_color = lambda:None\r\n self.__set_warning_color = lambda:None\r\n self.__reset_color = lambda:None\r\n if self.isatty:\r\n if os.name == 'nt':\r\n SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute\r\n GetStdHandle = ctypes.windll.kernel32.GetStdHandle\r\n self.__set_error_color = lambda:SetConsoleTextAttribute(GetStdHandle(-11), 0x04)\r\n self.__set_warning_color = lambda:SetConsoleTextAttribute(GetStdHandle(-11), 0x06)\r\n self.__reset_color = lambda:SetConsoleTextAttribute(GetStdHandle(-11), 0x07)\r\n elif os.name == 'posix':\r\n self.__set_error_color = lambda:__write('\\033[31m')\r\n self.__set_warning_color = lambda:__write('\\033[33m')\r\n self.__reset_color = lambda:__write('\\033[0m')\r\n @classmethod\r\n def getLogger(cls, *args, **kwargs):\r\n return cls(*args, **kwargs)\r\n def basicConfig(self, *args, **kwargs):\r\n self.level = kwargs.get('level', self.__class__.INFO)\r\n if self.level > self.__class__.DEBUG:\r\n self.debug = self.dummy\r\n def log(self, level, fmt, *args, **kwargs):\r\n self.__write('%s - - [%s] %s\\n' % (level, time.ctime()[4:-5], fmt%args))\r\n def dummy(self, *args, **kwargs):\r\n pass\r\n def debug(self, fmt, *args, **kwargs):\r\n self.log('DEBUG', fmt, *args, **kwargs)\r\n def info(self, fmt, *args, **kwargs):\r\n self.log('INFO', fmt, *args)\r\n def warning(self, fmt, *args, **kwargs):\r\n self.__set_warning_color()\r\n self.log('WARNING', fmt, *args, **kwargs)\r\n self.__reset_color()\r\n def warn(self, fmt, *args, **kwargs):\r\n self.warning(fmt, *args, **kwargs)\r\n def error(self, fmt, *args, **kwargs):\r\n self.__set_error_color()\r\n self.log('ERROR', fmt, *args, **kwargs)\r\n self.__reset_color()\r\n def exception(self, fmt, *args, **kwargs):\r\n self.error(fmt, *args, **kwargs)\r\n traceback.print_exc(file=sys.stderr)\r\n def critical(self, fmt, *args, **kwargs):\r\n self.__set_error_color()\r\n self.log('CRITICAL', fmt, *args, **kwargs)\r\n self.__reset_color()\r\nlogging = sys.modules['logging'] = Logging('logging')\r\n\r\nclass DNSUtil(object):\r\n \"\"\"\r\n http://gfwrev.blogspot.com/2009/11/gfwdns.html\r\n http://zh.wikipedia.org/wiki/域名服务器缓存污染\r\n http://support.microsoft.com/kb/241352\r\n \"\"\"\r\n blacklist = set([\r\n '1.1.1.1', '255.255.255.255', # for ipv6\r\n '74.125.127.102', '74.125.155.102', '74.125.39.113', '209.85.229.138', # for google+\r\n '128.121.126.139', '159.106.121.75', '169.132.13.103', '192.67.198.6',\r\n '202.106.1.2', '202.181.7.85', '203.161.230.171', '203.98.7.65',\r\n '207.12.88.98', '208.56.31.43', '209.145.54.50', '209.220.30.174',\r\n '209.36.73.33', '211.94.66.147', '213.169.251.35', '216.221.188.182',\r\n '216.234.179.13', '243.185.187.39', '37.61.54.158', '4.36.66.178',\r\n '46.82.174.68', '59.24.3.173', '64.33.88.161', '64.33.99.47',\r\n '64.66.163.251', '65.104.202.252', '65.160.219.113', '66.45.252.237',\r\n '72.14.205.104', '72.14.205.99', '78.16.49.15', '8.7.198.45', '93.46.8.89',\r\n ])\r\n max_retry = 1\r\n max_wait = 3\r\n\r\n @staticmethod\r\n def _reply_to_iplist(data):\r\n assert isinstance(data, basestring)\r\n iplist = ['.'.join(str(ord(x)) for x in s) for s in re.findall('\\xc0.\\x00\\x01\\x00\\x01.{6}(.{4})', data) if all(ord(x)<=255 for x in s)]\r\n return iplist\r\n\r\n @staticmethod\r\n def is_bad_reply(data):\r\n assert isinstance(data, basestring)\r\n iplist = ['.'.join(str(ord(x)) for x in s) for s in re.findall('\\xc0.\\x00\\x01\\x00\\x01.{6}(.{4})', data) if all(ord(x)<=255 for x in s)]\r\n iplist += ['.'.join(str(ord(x)) for x in s) for s in re.findall('\\x00\\x01\\x00\\x01.{6}(.{4})', data) if all(ord(x)<=255 for x in s)]\r\n return any(x in DNSUtil.blacklist for x in iplist)\r\n\r\n @staticmethod\r\n def _remote_resolve(dnsserver, qname, timeout=None):\r\n if isinstance(dnsserver, tuple):\r\n dnsserver, port = dnsserver\r\n else:\r\n port = 53\r\n for i in xrange(DNSUtil.max_retry):\r\n host = ''.join(chr(len(x))+x for x in qname.split('.'))\r\n seqid = os.urandom(2)\r\n data = '%s\\x01\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\x00\\x00%s\\x00\\x00\\x01\\x00\\x01' % (seqid, host)\r\n address_family = socket.AF_INET6 if ':' in dnsserver else socket.AF_INET\r\n sock = None\r\n try:\r\n if i < DNSUtil.max_retry-1:\r\n # UDP mode query\r\n sock = socket.socket(family=address_family, type=socket.SOCK_DGRAM)\r\n sock.settimeout(timeout)\r\n sock.sendto(data, (dnsserver, port))\r\n for i in xrange(DNSUtil.max_wait):\r\n data = sock.recv(512)\r\n if data and not DNSUtil.is_bad_reply(data):\r\n return data[2:]\r\n else:\r\n logging.warning('DNSUtil._remote_resolve(dnsserver=%r, %r) return position udp data=%r', qname, dnsserver, data)\r\n else:\r\n # TCP mode query\r\n sock = socket.socket(family=address_family, type=socket.SOCK_STREAM)\r\n sock.settimeout(timeout)\r\n sock.connect((dnsserver, port))\r\n data = struct.pack('>h', len(data)) + data\r\n sock.send(data)\r\n rfile = sock.makefile('r', 512)\r\n data = rfile.read(2)\r\n if not data:\r\n logging.warning('DNSUtil._remote_resolve(dnsserver=%r, %r) return bad tcp header data=%r', qname, dnsserver, data)\r\n data = rfile.read(struct.unpack('>h', data)[0])\r\n if data and not DNSUtil.is_bad_reply(data):\r\n return data[2:]\r\n else:\r\n logging.warning('DNSUtil._remote_resolve(dnsserver=%r, %r) return bad tcp data=%r', qname, dnsserver, data)\r\n except socket.error as e:\r\n if e[0] in (10060, 'timed out'):\r\n continue\r\n except Exception as e:\r\n raise\r\n finally:\r\n if sock:\r\n sock.close()\r\n\r\n @staticmethod\r\n def remote_resolve(dnsserver, qname, timeout=None):\r\n data = DNSUtil._remote_resolve(dnsserver, qname, timeout)\r\n iplist = DNSUtil._reply_to_iplist(data or '')\r\n return iplist\r\n\r\n\r\nclass DNSServer(getattr(gevent.server, 'DatagramServer', DummyGeventObject)):\r\n \"\"\"DNS Proxy over TCP to avoid DNS poisoning\"\"\"\r\n dnsservers = ['8.8.8.8', '8.8.4.4']\r\n max_wait = 1\r\n max_retry = 2\r\n max_cache_size = 2000\r\n timeout = 6\r\n\r\n def __init__(self, *args, **kwargs):\r\n gevent.server.DatagramServer.__init__(self, *args, **kwargs)\r\n self.cache = {}\r\n def handle(self, data, address):\r\n reqid = data[:2]\r\n domain = data[12:data.find('\\x00', 12)]\r\n if len(self.cache) > self.max_cache_size:\r\n self.cache.clear()\r\n if domain not in self.cache:\r\n qname = re.sub(r'[\\x01-\\x29]', '.', domain[1:])\r\n try:\r\n dnsserver = random.choice(self.dnsservers)\r\n logging.info('DNSServer resolve domain=%r by dnsserver=%r to iplist', qname, dnsserver)\r\n data = DNSUtil._remote_resolve(dnsserver, qname, self.timeout)\r\n if not data:\r\n logging.warning('DNSServer resolve domain=%r return data=%s', qname, data)\r\n return\r\n iplist = DNSUtil._reply_to_iplist(data)\r\n self.cache[domain] = data\r\n logging.info('DNSServer resolve domain=%r return iplist=%s', qname, iplist)\r\n except socket.error as e:\r\n logging.error('DNSServer resolve domain=%r to iplist failed:%s', qname, e)\r\n return self.sendto(reqid + self.cache[domain], address)\r\n\r\n\r\ndef main():\r\n logging.basicConfig(level=logging.DEBUG, format='%(levelname)s - %(asctime)s %(message)s', datefmt='[%b %d %H:%M:%S]')\r\n address = ('', 53)\r\n server = DNSServer(address)\r\n logging.info('serving at %r', address)\r\n server.serve_forever()\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"dnsproxy.py","file_name":"dnsproxy.py","file_ext":"py","file_size_in_byte":14058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"208050537","text":"import sys, os\nimport numpy as np\nimport pandas as pd\nimport json\n\n\n#This program takes a \"data\" file, and cleans it into the infomration we are concened with\n\ndef sortMtoL(e):\n if(e[13] == 0):\n return 1\n return e[12]/e[13]\ndef sortMLtoT(e):\n if(e[11] == 0):\n return 1\n return e[2]/e[11]\n\ninfile = \"data.csv\" \nsort = input('Type 1 for sorting method 1, 2 for method 2 ')\nprint(sort)\n\nif (not (sort == 1 or sort == 2)):\n exit()\n \nclassifications = pd.read_csv(infile)\n# write the header line for each of the files\n# each has the basic classification information + the mark information\n# including sanity check stuff + stuff we may never need, like the tool number\n# and the frame the user drew the mark on, respectively\n\n# the other/interesting marker is an ellipse+tag: {(x, y), (rx, ry), angle, text}\n\n\n\ni = 0\n\ndata = []\n\nfor i, row in enumerate(classifications.iterrows()):\n # row[0] is the index, [1] is the classification info\n cl = row[1]\n \n ra = 0.0 + cl['ra']\n dec = 0.0 + cl['dec']\n disk = 0.0 + cl['t01_smooth_or_features_a02_features_or_disk_weighted_fraction']\n faceon = 0.0 + cl['t02_edgeon_a05_no_weighted_fraction']\n spiral = 0.0 + cl['t04_spiral_a08_spiral_weighted_fraction']\n spiral_W = disk * faceon * spiral + 0.0\n bulge_1 = 0.0 + cl['t05_bulge_prominence_a10_no_bulge_weighted_fraction']\n bulge_2 = 0.0 + cl['t05_bulge_prominence_a11_just_noticeable_weighted_fraction']\n bulge_3 = 0.0 + cl['t05_bulge_prominence_a12_obvious_weighted_fraction']\n bulge_4 = 0.0 + cl['t05_bulge_prominence_a13_dominant_weighted_fraction']\n arms_tight = 0.0 + cl['t10_arms_winding_a28_tight_weighted_fraction']\n arms_medium = 0.0 + cl['t10_arms_winding_a29_medium_weighted_fraction']\n arms_loose = 0.0 + cl['t10_arms_winding_a30_loose_weighted_fraction']\n arms_1 = 0.0 + cl['t11_arms_number_a31_1_weighted_fraction']\n arms_2 = 0.0 + cl['t11_arms_number_a32_2_weighted_fraction']\n arms_3 = 0.0 + cl['t11_arms_number_a33_3_weighted_fraction']\n arms_4 = 0.0 + cl['t11_arms_number_a34_4_weighted_fraction']\n arms_5 = 0.0 + cl['t11_arms_number_a36_more_than_4_weighted_fraction']\n arms_6 = 0.0 + cl['t11_arms_number_a37_cant_tell_weighted_fraction']\n irregular = 0.0 + cl['t08_odd_feature_a22_irregular_weighted_fraction']\n LplusM = arms_loose + arms_medium\n \n\n # the image which was classified\n #print (subject_id)\n #print(\"%f\" % faceon)\n #and not (bulge_1+bulge_2 > .67 and arms_tight > .67) and not (bulge_4 > .67 and arms_loose > .67)\n \n \n \n if(spiral_W > .67 and LplusM > .67 and (1-irregular) > .67 and not (bulge_1+bulge_2 > .67 and arms_tight > .67) and not (bulge_4 > .67 and arms_loose > .67)):\n data.append([ra, dec, LplusM, disk, faceon, spiral, spiral_W, bulge_1, bulge_2, bulge_3, bulge_4, arms_tight, arms_medium, arms_loose, arms_1, arms_2, arms_3, arms_4, arms_5, arms_6, irregular])\n #print(i)\n i = i+1\n \nif (sort == 1):\n data.sort(key=sortMtoL)\nif (sort == 2):\n data.sort(key=sortMLtoT)\n \nfoth = open(\"SelectedSpirals.csv\", \"w\") \n\nfoth.write(\"Dr, RA, DA, spiral_weighted, loose_plus_medium, arms_tight, arms_medium, arms_loose, irregular \\n\")\n\n\nfor list in data:\n foth.write(\"%f, %f, %f, %f, %f, %f, %f, %f, %f\\n\" % (0.0, list[0], list[1], list[6], list[2], list[11], list[12], list[13], list[20]))\n\n\nfoth.close()\n\n\nprint(\"Saved %d marks from %d classifications to clean.csv.\" % (i, len(classifications)))\n\n\n\n\n#\n","sub_path":"ZooniverseSpiralData/snd.py","file_name":"snd.py","file_ext":"py","file_size_in_byte":3541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"522177782","text":"# path of the tcpdump\nimport os\nimport subprocess\n\ntcpdump = '/usr/sbin/tcpdump'\n\nclass TcpDump:\n def __init__(self,pcap_file):\n self.pcap_file = pcap_file\n self.proc = None\n\n if not os.path.isfile(tcpdump):\n raise 'Cannot find tcpdump in ' + tcpdump\n\n def start(self,duration,iface):\n pargs = [tcpdump,'-i',iface,'-G',duration]\n pargs.extend(['-w',self.pcap_file])\n self.proc = subprocess.Popen(pargs)\n\n def stop(self):\n if self.proc != None and self.proc.poll() == None :\n self.proc.terminate()\n\n def read(self):\n proc = subprocess.Popen([tcpdump,'-r',self.pcap_file],stdout=subprocess.PIPE)\n return proc.communicate()[0]\n\n\nif __name__ == \"__main__\":\n Dump = TcpDump(\"init.pcap\")\n Dump.start(\"60\",\"wlp2s0\")\n Dump.stop()\n print(Dump.read())\n","sub_path":"RT-NIDS-Malicous-URL-Detection-master/scripts/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"26733982","text":"\"\"\"\n@brief test log(time=1s)\n\nYou should indicate a time in seconds. The program ``run_unittests.py``\nwill sort all test files by increasing time and run them.\n\"\"\"\n\n\nimport sys\nimport os\nimport unittest\nfrom pyquickhelper.pycode import ExtTestCase\n\ntry:\n import src\nexcept ImportError:\n path = os.path.normpath(\n os.path.abspath(\n os.path.join(\n os.path.split(__file__)[0],\n \"..\",\n \"..\")))\n if path not in sys.path:\n sys.path.append(path)\n import src\n\nfrom src.python3_module_template.subproject.myexampleb import myclassb\nfrom src.python3_module_template.subproject2.myexample2 import myclass2\nfrom src.python3_module_template import _setup_hook\n\n\nclass TestExampleExt(ExtTestCase):\n \"\"\"Second example of a test.\"\"\"\n\n def test_src(self):\n \"skip pylint\"\n self.assertFalse(src is None)\n\n def test_static(self):\n self.assertTrue(myclass2.static_example() is not None)\n cl = myclass2(1)\n self.assertTrue(cl.property_example is not None)\n\n def test_hook(self):\n _setup_hook()\n\n def test_myclassb(self):\n b = myclassb(1)\n c = b.method_napoleon(1, 2)\n self.assertEqual(c, 4)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"_unittests/ut_example/test_example_ext.py","file_name":"test_example_ext.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"25564981","text":"#!/usr/bin/env python\n# -*- mode: python; indent-tabs-mode: nil; -*- coding: utf-8 -*-\n\n\"\"\"\nConnectionPool.py\n\nCopyright 2010 by Marcello Perathoner\n\nDistributable under the GNU General Public License Version 3 or newer.\n\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\n\nimport psycopg2\nimport sqlalchemy.pool as pool\nimport cherrypy\nfrom cherrypy.process import plugins\n\n\nclass ConnectionCreator ():\n \"\"\" Creates connections for the connection pool. \"\"\"\n\n def __init__ (self, params):\n self.params = params\n\n def __call__ (self):\n cherrypy.log (\n \"Connecting to database '%(database)s' on '%(host)s:%(port)d' as user '%(user)s'.\"\n % self.params, context = 'POSTGRES', severity = logging.INFO)\n conn = psycopg2.connect (**self.params)\n conn.cursor ().execute ('SET statement_timeout = 5000')\n return conn\n\n\nclass ConnectionPool (plugins.SimplePlugin):\n \"\"\"A WSPBus plugin that controls a SQLAlchemy engine/connection pool.\"\"\"\n\n def __init__ (self, bus, params = None):\n plugins.SimplePlugin.__init__ (self, bus)\n self.params = params\n self.name = 'sqlalchemy'\n self.pool = None\n\n\n def _start (self):\n \"\"\" Init the connection pool. \"\"\"\n\n pool_size = cherrypy.config.get ('sqlalchemy.pool_size', 5)\n max_overflow = cherrypy.config.get ('sqlalchemy.max_overflow', 10)\n timeout = cherrypy.config.get ('sqlalchemy.timeout', 30)\n recycle = cherrypy.config.get ('sqlalchemy.recycle', 3600)\n\n self.bus.log (\"... pool_size = %d, max_overflow = %d\" % (pool_size, max_overflow))\n\n return pool.QueuePool (ConnectionCreator (self.params),\n pool_size = pool_size,\n max_overflow = max_overflow,\n timeout = timeout,\n recycle = recycle,\n use_threadlocal = True)\n\n\n def connect (self):\n \"\"\" Return a connection. \"\"\"\n\n return self.pool.connect ()\n\n\n def start (self):\n \"\"\" Called on engine start. \"\"\"\n\n if self.pool is None:\n self.bus.log (\"Creating the SQL connection pool ...\")\n self.pool = self._start ()\n else:\n self.bus.log (\"An SQL connection pool already exists.\")\n # start.priority = 80\n\n\n def stop (self):\n \"\"\" Called on engine stop. \"\"\"\n\n if self.pool is not None:\n self.bus.log (\"Disposing the SQL connection pool.\")\n self.pool.dispose ()\n self.pool = None\n\n\n def graceful (self):\n \"\"\" Called on engine restart. \"\"\"\n\n if self.pool is not None:\n self.bus.log (\"Restarting the SQL connection pool ...\")\n self.pool.dispose ()\n self.pool = self._start ()\n\n\ncherrypy.process.plugins.ConnectionPool = ConnectionPool\n","sub_path":"ConnectionPool.py","file_name":"ConnectionPool.py","file_ext":"py","file_size_in_byte":2954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"399263558","text":"# coding=gbk\nimport random\n\nfrom login.templates.admin.account.adminlogin import login_admin\nfrom login.templates.platform.common.Add_CopyRight import add_AudioBookCopyright, add_ReadBookCopyright, \\\n add_ComicCopyright\nfrom login.templates.platform.common.Create_Number import create_PhoneNum, create_IDNumber, create_CreditCardNumbers\nfrom login.templates.platform.common.Create_PartnerName import partnerName\nfrom login.templates.platform.common.operate_mysql import select_billing\nfrom login.templates.utils import httputils, confutils\nfrom login.templates.utils.confutils import getAdminName\nimport json\n\nfrom login.templates.utils.httputils import getadmin\n\n\ndef add_CopyrightPartner(business, partnerType):\n '''添加版权合作方\n :param business 0表示勾选全部业务 1表示付费收听 2 表示电子阅读 3表示VIP会员 4 表示漫画\n :param partnerType 1表示个人账户 2表示公司账户\n '''\n admintoken = login_admin() # 登录admin获取token\n admin_api = getAdminName('partnerEdit') # 获取partnerEdit接口\n print(admin_api)\n # 数据准备\n data = {}\n PartnerName_Record = partnerName(2) # 生成版权合作方\n PartnerName = PartnerName_Record[0] # 生成一个新的版权合作方名称\n PartnerName_Number = PartnerName_Record[1] # 获取新的版权合作方名称后面的数字\n # 判断是个人账户还是公司账户\n if partnerType == 1:\n data['contactPerson'] = PartnerName + '(个人账户)'\n elif partnerType == 2:\n data['fullName'] = PartnerName + '(公司账户)'\n data['shortName'] = PartnerName + '(公司账户)'\n data['contactPerson'] = PartnerName + '(公司账户)'\n data['deductTaxRate'] = random.randint(2, 20)\n else:\n print('账户类型输入有误!!!')\n if business == 0:\n book_cp = add_AudioBookCopyright() # 获取书籍版权信息\n book_cp_id = book_cp[0] # 获取书籍版权id\n book_cp_name = book_cp[1] # 获取书籍版权name\n readbook_cp = add_ReadBookCopyright() # 获取电子阅读版权信息\n readbook_cp_id = str(readbook_cp[0]) # 获取电子阅读版权id\n readbook_cp_name = readbook_cp[1] # 获取电子阅读版权name\n comic_cp = add_ComicCopyright() # 获取漫画版权信息\n comic_cp_id = str(comic_cp[0]) # 获取漫画版权id\n comic_cp_name = comic_cp[1] # 获取漫画版权name\n data['spTypeBook'] = 1\n data['spTypeReadBook'] = 1\n data['spTypeVIP'] = 1\n data['spTypeComic'] = 1\n data['readbookEntityId'] = readbook_cp_id\n data['readbookEntityName'] = readbook_cp_name\n data['comicEntityId'] = comic_cp_id\n data['comicEntityName'] = comic_cp_name\n data['bookEntityId'] = book_cp_id\n data['bookEntityName'] = book_cp_name\n elif business == 1:\n book_cp = add_AudioBookCopyright() # 获取版权信息\n book_cp_id = book_cp[0] # 获取版权id\n book_cp_name = book_cp[1] # 获取版权name\n data['spTypeBook'] = 1\n data['spTypeReadBook'] = 0\n data['spTypeVIP'] = 1\n data['spTypeComic'] = 0\n data['readbookEntityId'] = ''\n data['readbookEntityName'] = ''\n data['comicEntityId'] = ''\n data['comicEntityName'] = ''\n data['bookEntityId'] = book_cp_id\n data['bookEntityName'] = book_cp_name\n elif business == 2:\n readbook_cp = add_ReadBookCopyright() # 获取版权信息\n readbook_cp_id = str(readbook_cp[0]) # 获取版权id\n readbook_cp_name = readbook_cp[1] # 获取版权name\n data['spTypeBook'] = 0\n data['spTypeReadBook'] = 1\n data['spTypeVIP'] = 0\n data['spTypeComic'] = 0\n data['bookEntityId'] = ''\n data['bookEntityName'] = ''\n data['comicEntityId'] = ''\n data['comicEntityName'] = ''\n data['readbookEntityId'] = readbook_cp_id\n data['readbookEntityName'] = readbook_cp_name\n elif business == 3:\n book_cp = add_AudioBookCopyright() # 获取版权信息\n book_cp_id = book_cp[0] # 获取版权id\n book_cp_name = book_cp[1] # 获取版权name\n data['spTypeBook'] = 0\n data['spTypeReadBook'] = 0\n data['spTypeVIP'] = 1\n data['spTypeComic'] = 0\n data['readbookEntityId'] = ''\n data['readbookEntityName'] = ''\n data['comicEntityId'] = ''\n data['comicEntityName'] = ''\n data['bookEntityId'] = book_cp_id\n data['bookEntityName'] = book_cp_name\n elif business == 4:\n comic_cp = add_ComicCopyright() # 获取版权信息\n comic_cp_id = str(comic_cp[0]) # 获取版权id\n comic_cp_name = comic_cp[1] # 获取版权name\n data['spTypeBook'] = 0\n data['spTypeReadBook'] = 0\n data['spTypeVIP'] = 0\n data['spTypeComic'] = 1\n data['readbookEntityId'] = ''\n data['readbookEntityName'] = ''\n data['bookEntityId'] = ''\n data['bookEntityName'] = ''\n data['comicEntityId'] = comic_cp_id\n data['comicEntityName'] = comic_cp_name\n else:\n print('传参错误,请重新输入!!!')\n # 接口入参\n data['id'] = ''\n data['channelEntityId'] = ''\n data['channelEntityName'] = ''\n data['annoucerEntityId'] = ''\n data['annoucerEntityName'] = ''\n data['partnerStatus'] = 2\n data['canLogin'] = 0\n data['identityCode'] = create_IDNumber()\n data['phone'] = create_PhoneNum()\n data['qq'] = ''\n data['email'] = ''\n data['address'] = '深圳市南山区粤海街道'\n data['postcode'] = ''\n data['bankAccount'] = create_CreditCardNumbers()\n data['bankType'] = 1 # 1表示中国银行\n data['bankProvince'] = 7\n data['bankCity'] = 152\n data['bankName'] = '深圳市中国银行南山分行'\n data['taxType'] = 0\n data['billType'] = 1\n data['billCode'] = ''\n data['buttMan'] = ''\n data['loginName'] = 'zhoushichuanCP' + str(PartnerName_Number)\n data['password'] = 123456\n data['passwordConfirm'] = 123456\n data['partnerType'] = partnerType # 1表示个人账户 2表示公司账户\n data['cooperatorType'] = 2 # 1渠道 2版权 3主播\n data['spTypeAnnoucer'] = 0\n data['bankAccountName'] = '周同学01'\n\n # 发送post请求\n r = httputils.postadmin(admin_api, data, admintoken, confutils.getcurrentPath('PartnerEdit'))\n res = json.loads(r.text)\n print(res)\n if res['status'] == 0:\n cp_partner = select_billing(\"SELECT * from billing.p_partner order by id desc LIMIT 1;\")\n cp_partnerId = cp_partner[0]['id']\n cp_partnerName = cp_partner[0]['full_name']\n print('-----------版权合作方:' + cp_partnerName + '(id:' + str(cp_partnerId) + ')' + '添加成功!!!---------------------')\n return [cp_partnerId, cp_partnerName]\n else:\n print('---------添加失败!!!-----------')\n\n\ndef check_information(business, partnerType):\n '''分成合作方资料审核\n :param business 1表示付费收听 2 表示电子阅读 3表示VIP会员 4 表示漫画\n :param partnerType 1表示个人账户 2表示公司账户\n '''\n admintoken = login_admin()\n admin_api = getAdminName('partnerConfirmEdit') # 获取接口\n # 入参\n copyright_partner = add_CopyrightPartner(business, partnerType) # 获取版权合作方id和名称\n copyright_partnerId = copyright_partner[0] # 获取主播合作方id\n copyright_partnerName = copyright_partner[1] # 获取主播合作方名称\n data = {}\n data['id'] = str(copyright_partnerId)\n data['partnerStatus'] = '0'\n # 发送get请求\n res = getadmin(admin_api, data, admintoken, confutils.getcurrentPath('PartnerList'))\n r = json.loads(res.text)\n print(r)\n if r['status'] == 0:\n print(\n '--------版权合作方:' + copyright_partnerName + '(id:' + str(copyright_partnerId) + ')' + '资源审核通过!!-----------')\n else:\n print('--------版权合作方资源审核失败!!-----------')\n\nif __name__=='__main__':\n add_CopyrightPartner(1,2)","sub_path":"login/templates/platform/add_Partner/Add_CopyrightPartner.py","file_name":"Add_CopyrightPartner.py","file_ext":"py","file_size_in_byte":8143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"15300416","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.core.urlresolvers import reverse\nfrom ..notifications.emails import email_send\nfrom intranet import settings\n\n\ndef event_approval_request(request, event):\n subject = \"Event Approval Request from {}\".format(event.user)\n emails = [settings.APPROVAL_EMAIL]\n\n base_url = request.build_absolute_uri(reverse('index'))\n data = {\n \"event\": event,\n \"info_link\": request.build_absolute_uri(reverse(\"event\", args=[event.id])),\n \"base_url\": base_url\n }\n\n email_send(\"events/emails/admin_approve.txt\",\n \"events/emails/admin_approve.html\",\n data, subject, emails)\n","sub_path":"intranet/apps/events/notifications.py","file_name":"notifications.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"371131126","text":"#import hashlib\nimport os.path\nimport subprocess as sp\nimport config\nimport vote\n\nHEXSTRING_LEN_GUAR = 55\nHEXSTRING_LEN_CAND = 48\n\nMOCK = True\n#MOCK = False\n\ndef string2hex(key, key_length):\n if key == None:\n return None\n s = key.ljust(key_length)\n result = ''\n for c in s:\n result = \"%s%0.2X\" % (result, ord(c))\n return result\n\ndef hash_string(s):\n cp1 = sp.run([os.path.join(config.BINPATH,\"Hash\"), s], stdout=sp.PIPE)\n return cp1.stdout[15:-2]\n\ndef election_dir(votation_id):\n election_dir = os.path.join(config.ELECTIONPATH, \"election_majority{}\".format(votation_id))\n return election_dir\n\n\ndef create_election(votation_id, candidates_n,words_n):\n \"\"\"Backend program Creation\"\"\"\n if MOCK: return True\n cp = sp.run([os.path.join(config.BINPATH, \"Creation\"), election_dir(votation_id), str(candidates_n), str(words_n)], stdout=sp.PIPE)\n control_string = \"Election successfully created with {} candidates and {} words\".format(candidates_n, words_n)\n if cp.stdout.decode('utf-8').startswith(control_string):\n return True\n else:\n return False\n\ndef election_state(votation_id):\n if MOCK: return [\"Lorem ipsum dolor sit amet, consectetur adipiscing elit,\",\"sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.\",\"Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.\"]\n p1 = election_dir(votation_id)\n cp = sp.run([os.path.join(config.BINPATH, \"State\"), p1], stdout=sp.PIPE)\n s = cp.stdout.decode('utf-8')\n ar = s.split('\\n')\n return ar\n\ndef election_pipe_in(votation_id):\n election_pipe = os.path.join(config.ELECTIONPATH, \"election_majority_in{}\".format(votation_id))\n return election_pipe\n\ndef election_pipe_out(votation_id):\n election_pipe = os.path.join(config.ELECTIONPATH, \"election_majority_out{}\".format(votation_id))\n return election_pipe\n\ndef election_vote_sec(votation_id, vote_array):\n # read from a named pipe\n result = None\n pipe_in = open(election_pipe_in(votation_id),\"w\") # write votes here\n pipe_out = open(election_pipe_out(votation_id),\"r\") # read results from here\n vote_line = str(user_id)\n successful_message = \"{} OK\".format(user_id)\n for n in vote_array:\n vote_line = (\"{} {}\".format(vote_line,n))\n print(vote_line, file=pipe_in)\n vote_result = pipe_out.readline()\n pipe_in.close()\n pipe_out.close()\n if MOCK:\n result = True\n else:\n result = vote_result == successful_message\n return result\n\n","sub_path":"frontend/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"54288390","text":"from collections import Counter, defaultdict, Iterable, namedtuple, OrderedDict\nfrom utils.error import ParamError\n\n\n\nclass Collections:\n\n\n def __init__(self, test_data=None):\n self.test_data = test_data\n self.cnt = Counter()\n\n\n def __copy__(self):\n return self\n\n\n def counter_num(self):\n \"\"\"\n 1.suitable for all types of data\n 2.no nested data exists,TODO how to adjusted?\n :return:\n \"\"\"\n if not isinstance(self.test_data, Iterable):\n raise ParamError\n for word in self.test_data:\n self.cnt[word] += 1\n return self.cnt\n\n\n def counter_most_common(self):\n \"\"\"\n filter num most\n :return:\n \"\"\"\n if not isinstance(self.test_data, Iterable):\n # print(ParamError.msg)\n raise ParamError\n cnt = Counter(self.test_data).most_common(1)\n return cnt\n\n\n @staticmethod\n def counter_opration(a, b):\n \"\"\"\n egg: +\n opration: + - * /\n :return:\n \"\"\"\n return a + b\n\n\n def deque(self):\n \"\"\"\n\n :return:\n \"\"\"\n pass\n\n\n def default_dict(self):\n \"\"\"\n statistics => dict\n :return:\n \"\"\"\n if not isinstance(self.test_data, Iterable):\n raise ParamError\n d_dict = defaultdict(list)\n for k, v in self.test_data:\n d_dict[k].append(v)\n return d_dict\n\n\n def default_str_dict(self):\n \"\"\"\n statistics => add after dict\n :return:\n \"\"\"\n if not isinstance(self.test_data, Iterable):\n raise ParamError\n str_count = defaultdict(int)\n for k in self.test_data:\n str_count[k] += 1\n return str_count.items()\n\n\n @staticmethod\n def named_tuple():\n \"\"\"\n name tuple\n :return:\n \"\"\"\n Point = namedtuple('Point', ['x', 'y'])\n p = Point(11, y=22)\n # return OrderedDict\n return p._asdict()\n\n\n def order_dict(self):\n \"\"\"\n prevent dict unordered when use hash\n :return:\n \"\"\"\n if not isinstance(self.test_data, dict):\n raise ParamError\n re = OrderedDict(sorted(self.test_data.items()), key=lambda x: x[0])\n return re\n\n\n\nif __name__ == '__main__':\n # collections = Collections('anasaha')\n # aa = Counter(a=3, b=1)\n # bb = Counter(a=1, b=5)\n # print(collections.counter_opration(aa, bb))\n c = Collections([('aa', 1), ('aa', 2), ('bb', 3)])\n print(c.order_dict())\n # c = Collections('ansasgasda')\n # print(c.default_str_dict())\n","sub_path":"utils/module/collections.py","file_name":"collections.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"236706300","text":"'''\r\n2.Вводиться дійсне число R. Обчислити і вивести довжину окружності, площу кола і об'єм кулі одного радіуса R.\r\n(0 ≤ R ≤ 100)\r\n'''\r\nwhile True:\r\n R = float(input('Введіть R: '))\r\n if 0 <= R <= 100:\r\n break\r\n\r\nfrom math import pi\r\n\r\nprint(f'Довжина кола: {2 * pi * R}')\r\nprint(f'Площа кола: {pi * R * R}')\r\nprint(f'Об\\'єм кулі: {(4 / 3) * (pi * R ** 3)}')\r\n","sub_path":"kr.py","file_name":"kr.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"653991219","text":"import numpy as np\nimport pandas as pd\nimport sys\nsys.path.append('../../modules/')\n\nfrom Analysis.analysis_utils import get_grids, analysis_specs, plot_specs, avg_performance_over_envs, avg_perf_over_envs_lines\nfrom Analysis.analysis_utils import no_rm_avg_std\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\n\n# import csv data summary\nparent_path = '../../Data/'\n#df = pd.read_csv(parent_path+'random_forget_ec.csv')\ndf = pd.read_csv(parent_path+'throttled_ec_allreps_chebyshev.csv')\n\ndef structured_unstructured(df_element):\n map = {'analytic successor':'structured',\n 'place_cell':'structured',\n 'onehot':'unstructured',\n 'random':'unstructured',\n 'conv_latents':'structured'}\n new_element = map[df_element]\n return new_element\n\ndf['representation'] = df['representation'].apply(structured_unstructured)\ngroups_to_split = ['env_name','representation','EC_cache_limit']\ngb = df.groupby(groups_to_split)[\"save_id\"]\n\nLINCLAB_COLS = {\"blue\" : \"#50a2d5\", # Linclab blue\n \"red\" : \"#eb3920\", # Linclab red\n \"grey\" : \"#969696\", # Linclab grey\n \"green\" : \"#76bb4b\", # Linclab green\n \"purple\": \"#9370db\",\n \"orange\": \"#ff8c00\",\n \"pink\" : \"#bb4b76\",\n \"yellow\": \"#e0b424\",\n \"brown\" : \"#b04900\",\n }\n\ncolor_map = {'structured':LINCLAB_COLS['red'], 'unstructured':LINCLAB_COLS['blue']}\nlabels = {'structured':'structured','unstructured':'unstructured'}\nenvs_to_plot = ['gridworld:gridworld-v11','gridworld:gridworld-v41','gridworld:gridworld-v31','gridworld:gridworld-v51']\nreps_to_plot = ['unstructured','structured']\npcts_to_plot = [100,75,50,25]\ngrids = get_grids(envs_to_plot)\navg_performance_over_envs(gb,envs_to_plot,reps_to_plot,pcts_to_plot,grids,colors=color_map,labels=labels,legend='pcts',savename='str_unstr_relative_to_unrestricted_bars',save=True)\navg_perf_over_envs_lines(gb,envs_to_plot,reps_to_plot,pcts_to_plot,grids,colors=color_map,labels=labels,legend='reps',savename='str_unstr_relative_to_unrestricted_lines',save=True)","sub_path":"basic/Analysis/CH2/scratchpad/06_str_unstr_compared_to_unrestricted.py","file_name":"06_str_unstr_compared_to_unrestricted.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"42509225","text":"# -*- coding: utf-8 -*-\n\nfrom django.conf.urls import *\nfrom django.contrib.auth.decorators import permission_required\nfrom ikwen_shavida.sales.views import confirm_order, cancel_order, confirm_processed, OrderDetail, PartnerDashboard, \\\n PartnerWalletList\n\nfrom ikwen_shavida.sales.views import choose_retail_bundle, choose_vod_bundle, choose_temp_bundle\n\nurlpatterns = patterns(\n '',\n url(r'^choose_retail_bundle$', choose_retail_bundle, name='choose_retail_bundle'),\n url(r'^choose_vod_bundle$', choose_vod_bundle, name='choose_vod_bundle'),\n url(r'^choose_temp_bundle$', choose_temp_bundle, name='choose_temp_bundle'),\n url(r'^confirm_order$', confirm_order, name='confirm_order'),\n url(r'^cancel_order$', cancel_order, name='cancel_order'),\n url(r'^confirm_processed$', confirm_processed, name='confirm_processed'),\n url(r'^orderDetail/(?P[-\\w]+)/$', OrderDetail.as_view(), name='order_detail'),\n url(r'^partnerDashboard/$', permission_required('sales.ik_access_partner_dashboard')(PartnerDashboard.as_view()), name='partner_dashboard'),\n url(r'^partnerWalletList/$', permission_required('sales.ik_view_partner_wallets')(PartnerWalletList.as_view()), name='partner_wallet_list'),\n)\n","sub_path":"sales/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"67469734","text":"from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy \n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///fdb.db'#'mysql://root:xT1VXp8KRZIe1BR5@localhost/'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = 'False'\n\ndb = SQLAlchemy(app)\n\nclass User(db.Model):\n id = db.Column( db.Integer, primary_key=True)\n name = db.Column( db.String(20))\n\n \n\n\n\ndb.create_all()\n\n\n\n\n\"\"\"mysql uri: 'mysql://:username:password@host/dbname'\"\"\"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Flask/Database.py","file_name":"Database.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"174706633","text":"#!/usr/bin/env python3\n# -\\- coding: utf-8 -\\-\n\n# -F:打包成一个EXE文件\n# -w:不带console输出控制台,window窗体格式\n# --paths:依赖包路径\n# --icon:图标\n# --noupx:不用upx压缩\n# --clean:清理掉临时文件\nimport os\nimport shutil\nfrom profile import profile\ndef replace_version():\n # 修改requirement.txt依赖版本\n f = open('requirement.txt', \"r\")\n libs = f.readlines()\n new_libs = []\n for lib in libs:\n if \"PyQt5==5.13.0\" in lib:\n print(\"5.13.0--->5.12.2\")\n lib = lib.replace(\"PyQt5==5.13.0\", \"PyQt5==5.12.2\")\n elif \"PyQt5==5.12.2\" in lib:\n print(\"5.12.2--->5.13.0\")\n lib = lib.replace(\"PyQt5==5.12.2\", \"PyQt5==5.13.0\")\n new_libs.append(lib)\n f.close()\n f = open('requirement.txt', \"w\")\n for new_lib in new_libs:\n f.write(new_lib)\n\n\"\"\"安装脚本\"\"\"\ndef install():\n from PyInstaller.__main__ import run\n opts = ['-F',\"-w\", r'--paths='+profile.QT_BIN_URL,\n r'--paths='+profile.QT_PLUGINS_URL,\n r\"--icon=\"+profile.ICON_URL,\n '--noupx', '--clean',\n 'manage.py']\n run(opts)\n shutil.copy(profile.EXE_URL, profile.NEW_EXE_URL)\n shutil.rmtree('dist/', ignore_errors=True)\n shutil.rmtree('build/', ignore_errors=True)\n os.remove(\"manage.spec\")\ntry:\n install()\n input(\"安装完成\")\nexcept Exception as e:\n input(\"出现异常:\"+str(e))","sub_path":"pyToExe.py","file_name":"pyToExe.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"488689273","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\nimport moustache\n\ntry:\n import pypandoc\n long_description = pypandoc.convert('README.md', 'rst')\nexcept(IOError, ImportError):\n long_description = open('README.md').read()\n\n\nsetup(\n name='Moustache.beta',\n version=moustache.__version__,\n python_requires='>=3',\n packages=find_packages(),\n author=\"Libriciel SCOP\",\n author_email=\"hackathon@libriciel.coop\",\n description=\"Module Moustache pour fusion ODT\",\n long_description=long_description,\n install_requires=[\n 'flask',\n 'secretary',\n 'Pillow',\n 'jinja2',\n 'babel',\n 'num2words',\n 'python-dateutil',\n 'requests',\n 'python-magic'\n ],\n include_package_data=True,\n url='https://gitlab.libriciel.fr/libriciel/hackathon-2018-01/moustache',\n entry_points={\n 'console_scripts': [\n 'moustache = moustache:launch'\n ],\n },\n license=\"CeCILL v2\",\n)\n","sub_path":"pypi_install_script/Moustache.beta-0.0.1.post18555.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"171334055","text":"import re\nimport urllib.request\nimport urllib.error\n\n\ndef download(url, user_agent='wswp', num_retries=2):\n print(\"Download:\", url)\n headers = {\"User-agent\": user_agent}\n request = urllib.request.Request(url, headers=headers)\n try:\n html = urllib.request.urlopen(request).read()\n except urllib.error.URLError as e:\n print(\"Download error:\", e.reason)\n html = None\n\n if num_retries > 0:\n if hasattr(e, \"code\") and 500 <= e.code < 600:\n return download(url, user_agent, num_retries-1)\n\ndef link_crawler(seed_url, link_regex):\n \"\"\"\n Crawl from the given seed URL seed URL following links matched by link_regex\n \"\"\"\n crawl_queue = [seed_url]\n while crawl_queue:\n url = crawl_queue.pop()\n html = download(url)\n\n for link in get_links(html):\n if re.match(link_regex, link):\n crawl_queue.append(link)\n\ndef get_links(html):\n webpage_regex = re.compile(\"]+href=['\\\"](.*?)['\\\"]\", re.IGNORECASE)\n return webpage_regex.findall(html)","sub_path":"helloLinkCrawler.py","file_name":"helloLinkCrawler.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"417516495","text":"import tensorflow as tf\nimport numpy as np\n\n\nclass JumpModelFine:\n def __init__(self):\n self.img_shape = (320, 320)\n self.batch_size = 8\n self.input_channel = 3\n self.out_channel = 2\n\n @staticmethod\n def conv2d(name, in_para, ks, stride):\n with tf.name_scope(name):\n with tf.variable_scope(name):\n w = tf.get_variable('%s-w' % name, shape=ks, initializer=tf.truncated_normal_initializer())\n b = tf.get_variable('%s-b' % name, shape=[ks[-1]], initializer=tf.constant_initializer())\n out_para = tf.nn.conv2d(in_para, w, strides=[1, stride, stride, 1], padding='SAME',\n name='%s-conv' % name)\n out_para = tf.nn.bias_add(out_para, b, name='%s-bias_add' % name)\n return out_para\n\n def make_conv_bn_relu(self, name, in_para, ks, stride, is_training):\n out_para = self.conv2d('%s-conv' % name, in_para, ks, stride)\n out_para = tf.layers.batch_normalization(out_para, name='%s-bn' % name, training=is_training)\n out_para = tf.nn.relu(out_para, name='%s-relu' % name)\n return out_para\n\n @staticmethod\n def make_fc(name, in_para, ks):\n with tf.name_scope(name):\n with tf.variable_scope(name):\n w = tf.get_variable('%s-w' % name, shape=ks, initializer=tf.truncated_normal_initializer())\n b = tf.get_variable('%s-b' % name, shape=[ks[-1]], initializer=tf.constant_initializer())\n out_para = tf.matmul(in_para, w, name='%s-mat' % name)\n out_para = tf.nn.bias_add(out_para, b, name='%s-bias_add' % name)\n # out_para = tf.nn.dropout(out_para, keep_prob, name='%s-drop' % name)\n return out_para\n\n def forward(self, img, is_training, name='fine'):\n # print(name)\n with tf.name_scope(name):\n with tf.variable_scope(name):\n out_para = self.conv2d('conv1', img, [3, 3, self.input_channel, 16], 2)\n # out_para = tf.layers.batch_normalization(out_para, name='bn1', training=is_training)\n out_para = tf.nn.relu(out_para, name='relu1')\n\n out_para = self.make_conv_bn_relu('conv2', out_para, [3, 3, 16, 64], 1, is_training)\n out_para = tf.nn.max_pool(out_para, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')\n\n out_para = self.make_conv_bn_relu('conv3', out_para, [5, 5, 64, 128], 1, is_training)\n out_para = tf.nn.max_pool(out_para, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')\n\n out_para = self.make_conv_bn_relu('conv4', out_para, [7, 7, 128, 256], 1, is_training)\n out_para = tf.nn.max_pool(out_para, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')\n\n out_para = self.make_conv_bn_relu('conv5', out_para, [9, 9, 256, 512], 1, is_training)\n out_para = tf.nn.max_pool(out_para, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')\n\n out_para = tf.reshape(out_para, [-1, 512 * 10 * 10])\n out_para = self.make_fc('fc1', out_para, [512 * 10 * 10, 512])\n out_para = self.make_fc('fc2', out_para, [512, 2])\n\n return out_para\n\n\nif __name__ == '__main__':\n model = JumpModelFine()\n # print(tf.zeros((1, 320, 320, 3)))\n # 第二个参数bool类型化\n out = model.forward(tf.zeros((1, 320, 320, 3)), tf.placeholder(np.bool, name='is_training'))\n print(out.get_shape().as_list())\n","sub_path":"model_fine.py","file_name":"model_fine.py","file_ext":"py","file_size_in_byte":3487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"237640159","text":"\nanswer = []\narray = [1,5,2,6,3,7,4]\ncomm = [[2, 5, 3], [4, 4, 1], [1, 7, 3]]\n\n\nlist_str2=[]\nfor i in range(len(comm)):\n list_str2 += list(map(str,comm[i]))\n print(list_str2)\n\n\n\n\ndef solution(array, commands):\n \n answer = []\n \n for i in range(len(commands)):\n \n tem =[]\n \n if commands[i][0]-1 is not commands[i][1]:\n tem = array[commands[i][0]-1:commands[i][1]]\n print(i,\"번째 :\",tem)\n else :\n tem = [array[commands[i][1]-1]]\n tem.sort()\n \n num = tem.pop(commands[i][2]-1)\n answer.append(num)\n \n return answer\n\n\n\ndef solution2(array, commands):\n \n answer = []\n tem = array[commands[0][0]-1:commands[0][1]-1]\n tem.sort()\n print(\"1 : \", array)\n print(\"1 : \", tem)\n \n #num1 = tem.pop(commands[0][2]-1)\n #answer.append(num1)\n \n \n if commands[1][1]-1 is not commands[1][1]-1:\n tem = array[commands[1][0]-1:commands[1][1]-1]\n else :\n tem = [array[commands[1][1]-1]]\n tem.sort()\n \n num = tem.pop(commands[1][2]-1)\n answer.append(num)\n \n tem = array[commands[2][0]-1:commands[2][1]-1]\n tem.sort()\n print(\"3 : \", array)\n print(\"3 : \", tem)\n \n #num1 = tem.pop(commands[2][2]-1)\n #answer.append(num1)\n\n #answer.sort() \n return answer\n\n\naaa = solution(array, comm)\n\nprint(aaa)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"basic_python/HELLOPYTHON/python_300/pro_pro.py","file_name":"pro_pro.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"504264205","text":"# MIT License\n\n# Copyright (c) 2023-present K. S. Ernest (iFire) Lee\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport os\nfrom ludwig.api import LudwigModel\nimport gradio as gr\nimport yaml\n\nconfig_str = \"\"\"\nmodel_type: llm\nbase_model: openlm-research/open_llama_3b_v2\ninput_features:\n - name: star_rating\n type: text\n - name: game_genre\n type: text\n - name: platform\n type: text\n - name: num_ratings\n type: text\noutput_features:\n - name: game_text\n type: text\nquantization:\n bits: 4\nadapter:\n type: lora\ntrainer:\n type: finetune\n learning_rate: 0.0003\n batch_size: 1\n gradient_accumulation_steps: 8\n epochs: 3\n learning_rate_scheduler:\n warmup_fraction: 0.01\nbackend:\n type: local\n\"\"\"\n\nconfig = yaml.safe_load(config_str)\n\n\nimport os\nfrom ludwig.api import LudwigModel\nimport gradio as gr\n\n# Define the enumeration lists\nplatform_options = [\n \"\",\n \"Download for Linux,Download for macOS,Download for Windows\",\n \"Download for Windows\",\n \"Download for macOS,Download for Windows\",\n \"Download for Linux,Download for macOS,Download for Android,Download for Windows\",\n \"Download for Linux,Download for Windows\",\n \"Download for Android,Download for Windows\",\n \"Download for Android,Download for macOS,Download for Windows\",\n \"Download for Android\",\n \"Download for Linux,Download for Android,Download for Windows\",\n \"Download for macOS\",\n \"Download for Linux,Download for macOS\",\n \"Download for Linux,Download for macOS,Download for Android\"\n]\n\ngame_genre_options = [\n \"\",\n \"Visual Novel\",\n \"Adventure\",\n \"Interactive Fiction\",\n \"Puzzle\",\n \"Action\",\n \"Platformer\",\n \"Role Playing\",\n \"Simulation\",\n \"Strategy\",\n \"Shooter\",\n \"Survival\",\n \"Rhythm\",\n \"Educational\",\n \"Card Game\",\n \"Fighting\",\n \"Racing\",\n \"Sports\"\n]\n\nmodel_dir = 'results/api_experiment_run_12/model'\nimport logging\nmodel = LudwigModel(config=config, logging_level=logging.INFO)\n\nif not os.path.exists(model_dir):\n # Train the model if it does not exist\n train_stats = model.train(dataset=\"items_games_5.csv\") # trust_remote_code=True\nelse:\n # Load the model and run Gradio if the model exists\n model.load(model_dir)\n \n def predict(platform, game_genre, num_ratings, star_rating):\n return model.predict({\n \"platform\": platform,\n \"game_genre\": game_genre,\n \"num_ratings\": num_ratings,\n \"star_rating\": star_rating\n })\n \n # Use these lists in the Gradio Interface\n iface = gr.Interface(\n fn=predict, \n inputs=[\n gr.inputs.Dropdown(platform_options, label=\"Platform\"), \n gr.inputs.Dropdown(game_genre_options, label=\"Game Genre\"), \n gr.inputs.Slider(minimum=0, maximum=10000, default=5000, label=\"Number of Ratings\"), \n gr.inputs.Slider(minimum=0, maximum=1, step=0.01, default=0.5, label=\"Star Rating\"),\n ], \n outputs=\"text\"\n )\n iface.launch()\n","sub_path":"recommend/recommend.py","file_name":"recommend.py","file_ext":"py","file_size_in_byte":4009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"480668504","text":"while True:\n html = urllib.request.urlopen(\"http://teamtrees.org\")\n soup = BeautifulSoup(html, 'html.parser')\n method1 = soup.find('div', id='totalTrees')\n Treecount = (method1[\"data-count\"])\n Treegap = (20000000-int(Treecount))\n today = datetime.date.today()\n future = datetime.date(2020,1,1)\n diff = future - today\n daystogo = diff.days\n Formatted_Treecount = (\"{:,d}\".format(int(Treecount)))\n Formatted_Treegap = (\"{:,d}\".format(int(Treegap)))\n percentage = ((int(Treecount) / int(20000000)) * 100)\n perc2 = round(percentage ,2)\n newtweet = (f\"🌳 Planted: {Formatted_Treecount} 🌳\\n🌳 Remaining: {Formatted_Treegap} 🌳 \\n🌳 {perc2}% of goal 🌳 \\n🌳 {daystogo} days remaining 🌳 \\n🌳 DONATE $1, PLANT A TREE 🌳 \\n🌳 http://teamtrees.org 🌳\")\n","sub_path":"treegap.py","file_name":"treegap.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"144046959","text":"# A container for an audio file to be exchanged from one filter to another in memory\r\nclass memory_wave_file():\r\n def __init__(self):\r\n self.filepath = ''\r\n self.channel = None\r\n self.srate = 0\r\n self.length = 0\r\n \r\n def read_wavfile(self, base_folder, file_path):\r\n #print('#')\r\n filepath = file_path.replace(os.path.abspath(base_folder),'')\r\n #print('#')\r\n self.filepath = filepath\r\n #print('#')\r\n af, sr = librosa.load(os.path.abspath(base_folder+filepath), sr=None, mono=False)\r\n \r\n #print(af.shape)\r\n self.srate = sr\r\n self.length = len(af[0])\r\n self.channel = af\r\n return self\r\n \r\n\r\n ","sub_path":"utility/feature_extractor/memory_files.py","file_name":"memory_files.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"155497551","text":"import sys\nimport os\nimport datetime\n\nimport numpy\nfrom numpy.random import seed\n\nfrom exp import feature_creator as fc\nfrom classifier import classifier_main as cm\nimport pandas as pd\n\nseed(1)\nos.environ['PYTHONHASHSEED'] = '0'\n\ndef create_features(csv_feature_folder):\n csv_features = csv_feature_folder + \"/features.csv\"\n\n label_col=15\n\n df = pd.read_csv(csv_features, header=0, delimiter=\",\", quoting=0, quotechar='\"').as_matrix()\n y = df[:, label_col]\n\n X = df[:, 1:15]\n # Convert feature vectors to float64 type\n X = X.astype(numpy.float32)\n\n return X, y\n\nif __name__ == \"__main__\":\n #this is the file pointing to the basic features, i.e., just the numeric values\n #msm4phi/paper2/data/training_data/basic_features.csv\n csv_feature_folder=sys.argv[1]\n\n #this is the folder to save output to\n outfolder=sys.argv[2]\n n_fold=10\n\n print(datetime.datetime.now())\n X, y=create_features(csv_feature_folder)\n\n #behaviour only\n print(\">>>>> _uddin2018_ >>>>>\")\n print(datetime.datetime.now())\n cls = cm.Classifer(\"stakeholdercls\", \"_uddin18_\", X, y, outfolder,\n categorical_targets=6, nfold=n_fold, algorithms=[\"svm_l\"])\n cls.run()\n\n\n","sub_path":"code/python/src/exp/exp_uddin.py","file_name":"exp_uddin.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"424364815","text":"\"\"\" Stylesheet transformer for YAML input files and stylesheets in jinja2 format. \"\"\"\n\n\nimport argparse\nfrom pathlib import Path\nimport yaml\nfrom jinja2 import Environment, FileSystemLoader\n\ndef main():\n \"\"\" Main entry point for YAML Stylesheet Transformer \"\"\"\n parser = argparse.ArgumentParser(\n description='Apply stylesheet to input file.')\n parser.add_argument('-s', '--stylesheet', dest='stylesheet',\n help='stylesheet to apply', required=True)\n parser.add_argument('input', help='input file')\n\n args = parser.parse_args()\n stylesheet_path = Path(args.stylesheet)\n input_path = Path(args.input)\n if not input_path.exists() or not input_path.is_file():\n print('Input file ' + args.input + ' does not exist.')\n elif not stylesheet_path.exists() or not stylesheet_path.is_file():\n print('Stylesheet ' + args.stylesheet + ' does not exist.')\n else:\n env = Environment(loader=FileSystemLoader(str(stylesheet_path.resolve().parent)))\n template = env.get_template(stylesheet_path.name)\n context = yaml.load(input_path.read_text())\n print('## ' + str(context))\n output = template.render(context=context, filename=input_path.name)\n print(output)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"yslt.py","file_name":"yslt.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"558425214","text":"from valclient.client import Client\nimport json\n\nwith open(\"inventory_reference.json\") as f:\n inv = json.load(f)\n client = Client()\n client.activate()\n #client.put_player_loadout(inv)\n\nprint(json.dumps(client.fetch_player_loadout()))","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"428342616","text":"from multiprocessing import Pool\nimport time\nimport os\nimport math\n\ndef f(x):\n print(\"값\", x, \"에 대한 작업 Pid = \",os.getpid())\n time.sleep(1)\n return x*x\n\nif __name__ == '__main__':\n p = Pool(3)\n startTime = int(time.time())\n print(p.map(f, range(0,10)))\n endTime = int(time.time())\n print(\"총 작업 시간\", (endTime - startTime))","sub_path":"src/com/fwk/business/util/common/poolj.py","file_name":"poolj.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"52307768","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 9 09:28:27 2018\n\n@author: luohaozheng\n\"\"\"\n\n#!/usr/bin/python\n # coding=utf-8\n#########################################\n# kNN: k Nearest Neighbors\n \n # 输入: newInput: (1xN)的待分类向量\n # dataSet: (NxM)的训练数据集\n # labels: 训练数据集的类别标签向量\n # k: 近邻数\n \n # 输出: 可能性最大的分类标签\n #########################################\n \nimport numpy as np\nimport Kmeans\n # 创建一个数据集,包含2个类别共4个样本\ndef createDataSet():\n # 生成一个矩阵,每行表示一个样本\n y=list()\n fp=open('datapoint.csv','r')\n title=fp.readline()\n for line in fp:\n l1=line.split(',')\n x=[0,0]\n x[0]=float(l1[0])\n x[1]=float(l1[1])\n y.append(x)\n group = np.array(y)\n # 4个样本分别所属的类别\n labels = list(Kmeans.data.color)\n return group, labels\n \n # KNN分类算法函数定义\ndef kNNClassify(newInput, dataSet, labels, k):\n numSamples = dataSet.shape[0] # shape[0]表示行数\n diff = np.tile(newInput, (numSamples, 1)) - dataSet # 按元素求差值\n squaredDiff = diff ** 2 # 将差值平方\n squaredDist = np.sum(squaredDiff, axis = 1) # 按行累加\n distance = squaredDist ** 0.5 # 将差值平方和求开方,即得距离\n \n # # step 2: 对距离排序\n # argsort() 返回排序后的索引值\n sortedDistIndices = np.argsort(distance)\n classCount = {} # define a dictionary (can be append element)\n for i in range(k):\n # # step 3: 选择k个最近邻\n voteLabel = labels[sortedDistIndices[k]]\n # # step 4: 计算k个最近邻中各类别出现的次数\n # when the key voteLabel is not in dictionary classCount, get()\n # will return 0\n classCount[voteLabel] = classCount.get(voteLabel, 0) + 1\n \n # # step 5: 返回出现次数最多的类别标签\n maxCount = 0\n for key, value in classCount.items():\n if value > maxCount:\n maxCount = value\n maxIndex = key\n return maxIndex\n \ndef main(): \n # 生成数据集和类别标签\n dataSet, labels = createDataSet()\n # 定义一个未知类别的数据\n testX = np.array([1.2, 1.0])\n k = 3\n # 调用分类函数对未知数据分类\n outputLabel = kNNClassify(testX, dataSet, labels, k)\n print( \"Your input is:\", testX, \"and classified to class: \", outputLabel) \n testX = np.array([-1.1, -1.5])\n outputLabel = kNNClassify(testX, dataSet, labels, k)\n print( \"Your input is:\", testX, \"and classified to class: \", outputLabel)\n \nmain()","sub_path":"KNN/KNN.py","file_name":"KNN.py","file_ext":"py","file_size_in_byte":2728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"929129","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom entsoe import EntsoePandasClient\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\n# functions\ndef multiply_by_sourceEm(arr,sourceEm,unitConv):\n return np.multiply(arr,sourceEm*unitConv)\n\ndef query_entsoe(start, end, country_list):\n df = []\n for index, country in enumerate(country_list):\n # check if file already exists\n files = os.listdir('./data')\n timespan = f\"{start.strftime('%Y%m%d')}-{end.strftime('%Y%m%d')}\"\n check_file = f\"{timespan}_{country}_production.csv\"\n if check_file in files:\n # file already exists, just read it\n dfc = pd.read_csv(f'./data/{check_file}', index_col=0)\n else:\n # request \"Actual Generation per Production Type - Aggregated Generation per Type [16.1.B&C]\" to API\n dfc = client.query_generation(country, start=start, end=end)\n if dfc.columns.nlevels == 2:\n # Suppression des valeurs de consommation (de toute manière vides)\n # Suppression du second niveau pour les noms de colonnes (la mention \"Actual Aggregated\" qui ne sert à rien)\n dfc = dfc.filter(regex='Actual Aggregated', axis=1).droplevel(1,axis=1)\n # write csv file to /data\n dfc.rename(columns= {df.columns[0]:'date'}, inplace=True)\n dfc.to_csv(f\"./data/{check_file}\")\n # reconstitute df for country_list\n df.append(dfc)\n return df, timespan\n\n# CO2 emission of each technology in a dictionary\n# unite: gco2eq/kWh = Kg co2eq/MWh\n# source: https://www.ipcc.ch/site/assets/uploads/2018/02/ipcc_wg3_ar5_annex-iii.pdf\n\n# by default we refer to emission numbers in the table A.III.2 (median of life cycle analyses), otherwise we indicate the source with the numbers\n# used the biomass \"dedicated\", must check what is the dominant tech in each country...\n# used generic \"coal\" emission for both brown and hard coal,\n# all hydros use same number (24)\n\nco2PerSource = {\n 'Biomass': 230.,\n 'Fossil Brown coal/Lignite': 820.,\n 'Fossil Coal-derived gas': 490., # could not find numbers for coal derived gas, use fossil gas but probably very wrong!\n 'Fossil Gas': 490.,\n 'Fossil Hard coal': 820.,\n 'Fossil Oil': 733., # mean value of table 2 in https://www.world-nuclear.org/uploadedFiles/org/WNA/Publications/Working_Group_Reports/comparison_of_lifecycle.pdf\n 'Fossil Oil shale': 733.,\n 'Fossil Peat': 820., # could not find numbers for peat, use coal value\n 'Geothermal': 38.,\n 'Hydro Pumped Storage': 24.,\n 'Hydro Run-of-river and poundage': 24.,\n 'Hydro Water Reservoir': 24.,\n 'Marine': 17., # used 'ocean' in 'pre-commercial technologies' for marine, not sure!\n 'Nuclear': 12.,\n 'Other': 820., # used coal for other (conservative), to be refined!\n 'Other renewable': np.nan,\n 'Solar': 41., # used PV for solar\n 'Waste': 922.22, # for waste, note sure but used the first row of table 1 in https://www.mdpi.com/2071-1050/8/11/1181\n 'Wind Offshore': 12.,\n 'Wind Onshore': 11.\n }\n\n# client parameters\nclient = EntsoePandasClient(api_key='${{ secrets.TOKEN_API }}')\n\nfor year in range(2015,2021):\n #for month in range(1,13):\n # Paramétrage de la période de l'historique demandé\n # if yearly\n start = pd.Timestamp(year=year, month=1, day=1, tz='Europe/Brussels')\n end = pd.Timestamp(year=year, month=12, day=31, tz='Europe/Brussels')\n # if monthly\n #start = pd.Timestamp(year=year, month=month, day=1, tz='Europe/Brussels')\n #end = pd.Timestamp(year=year, month=month, day=start.daysinmonth, tz='Europe/Brussels')\n country_code = ['DE','FR']\n\n # call query\n df, timespan = query_entsoe(start, end, country_code)\n\n emissionDf = [dfc.copy() for dfc in df]\n # note from MichelB: original table reports power (in MW), we multiply by time step to get energy (in MWh), and then convert to emission in tCo2 = KgCo2/1000\n # quarter hour data\n # warning: time step not the same for all countries, use a list of time units in hours\n timeStep = [1./4.,1.]\n # we multiply by time unit but divide by 1000 to get tons of CO2\n unitConversion = [t/1000. for t in timeStep]\n\n for kcount,countr in enumerate(country_code):\n for source in df[kcount].keys():\n emissionDf[kcount][source] = df[kcount][[source]].apply(multiply_by_sourceEm, args = (co2PerSource[source],unitConversion[kcount]))\n\n # write csv file to /data with emissions per source in tons for each time step\n timespan = f\"{start.strftime('%Y%m%d')}-{end.strftime('%Y%m%d')}\"\n [emissionDfc.to_csv(f\"./data/{timespan}_{country_code[index]}_emission.csv\") for index, emissionDfc in enumerate(emissionDf)]\n\n # compute total production and emissions\n totProd = [dfc.apply(np.nansum, axis=1) for dfc in df]\n totEmit = [emissionDfc.apply(np.nansum, axis=1) for emissionDfc in emissionDf]\n\n # forces axes limit to overlay graphs\n plt.axes(xlim=(0, 100000), ylim=(0, 650), autoscale_on=False)\n\n # plot\n [plt.plot(totProd[k], totEmit[k]/totProd[k]/timeStep[k]*1000, '.', markersize=2) for k in range(len(timeStep))]\n plt.gca().set_ylabel(\"Emissions (gCO2/kWh)\")\n plt.gca().set_xlabel(\"Production (MWh)\")\n plt.legend(country_code)\n\n # save plot to .png\n filename = f\"./plots/{timespan}_{''.join(country_code)}_emission_vs_production.png\"\n plt.savefig(filename, dpi=300, transparent = True)\n\n # clear plot\n plt.clf()\n","sub_path":"data_analyis_request_ENTSO_E.py","file_name":"data_analyis_request_ENTSO_E.py","file_ext":"py","file_size_in_byte":5660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"637221049","text":"from flask import Flask\r\nfrom flask import render_template,request\r\n#from keras.models import load_model\r\nfrom os.path import dirname,realpath\r\n\r\nfrom werkzeug import secure_filename\r\n#from keras.preprocessing.image import load_img\r\n#import numpy as np\r\nimport os\r\n#import tensorflow as tf\r\n#tf.__version__\r\nimport model\r\nimport car\r\n#App will crash bevause of gunicorn set up in procfile we should be \r\n#careful with that\r\n#requirements and conda requirements add all the packages\r\napp=Flask(__name__)\r\n\r\nPROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))\r\nmain_folder=os.path.join(PROJECT_ROOT,\"Images_folder\")\r\nprint(main_folder)\r\napp.config['UPLOAD_FOLDER'] = main_folder\r\n@app.route('/')\r\ndef input():\r\n return render_template(\"input.html\")\r\n\r\n@app.route('/output',methods=['GET','POST'])\r\ndef image():\r\n if request.method=='POST':\r\n f=request.files['file']\r\n fil=secure_filename(f.filename)\r\n f.save(os.path.join(app.config['UPLOAD_FOLDER'], fil))\r\n \r\n path=os.path.join(app.config['UPLOAD_FOLDER'], fil)\r\n output1=car.pred_car(path)\r\n if output1 :\r\n output=model.predic(path)\r\n else:\r\n output=\"Please submit Car Image\"\r\n \r\n \r\n #list_passed=[\"Please\",\"Submit\"]\r\n return render_template(\"output.html\",result=output)\r\n \r\nif __name__=='__main__':\r\n app.run(debug=False,threaded=False)","sub_path":"flask_app.py","file_name":"flask_app.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"441718205","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport torch\n\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils\n\nfrom utils.data_utils import *\nfrom sklearn import preprocessing\n\nclass SarcopeniaDataset(Dataset):\n def __init__(self, X, asm, asm_h2, sarcopenia, height_squared, patient_id, gender, transform=None):\n \"\"\"\n Args:\n transform (callable, optional): Optional transform to be applied\n on a sample.\n \"\"\"\n self.X = X\n self.asm = asm\n self.asm_h2 = asm_h2\n self.sarcopenia = sarcopenia\n self.height_squared = height_squared\n self.patient_id = patient_id\n self.gender = gender\n self.transform = transform\n\n def __len__(self):\n return self.X.shape[0]\n\n def __getitem__(self, idx):\n X_i = self.X[idx, :]\n asm_i = self.asm[idx]\n asm_h2_i = self.asm_h2[idx]\n sarcopenia_i = self.sarcopenia[idx]\n height_squared_i = self.height_squared[idx]\n patient_id_i = self.patient_id[idx]\n gender_i = self.gender[idx]\n\n sample = {'X': X_i, 'asm': asm_i, 'asm_h2': asm_h2_i, 'sarcopenia': sarcopenia_i, \n 'height_squared': height_squared_i, 'patient_id': patient_id_i, 'gender': gender_i\n }\n if self.transform:\n sample = self.transform(sample)\n return sample\n\n \n\nclass ToTensor(object):\n \"\"\"Convert ndarrays in sample to Tensors.\"\"\"\n\n def __call__(self, sample):\n X, asm, asm_h2, sarcopenia = sample['X'], sample['asm'], sample['asm_h2'], sample['sarcopenia']\n height_squared, patient_id, gender = sample['height_squared'], sample['patient_id'], sample['gender']\n\n return {'X': torch.from_numpy(X),\n 'asm': torch.from_numpy(np.array(asm)),\n 'asm_h2': torch.from_numpy(np.array(asm_h2)),\n 'sarcopenia': torch.from_numpy(np.array(sarcopenia)),\n 'height_squared': torch.from_numpy(np.array(height_squared)),\n 'patient_id': torch.from_numpy(np.array(patient_id)),\n 'gender': torch.from_numpy(np.array(gender)),\n }\n\n\n\ndef normalize_data(X, feature_dict, using_features, dont_show=True):\n \"\"\"\n Inputs:\n - X: numpy array of shape(num of patients x num of features)\n eg: 132 x 16.\n - feature_index: list of ints, corresponding to the txt files.\n eg: [0, 3, 41].\n Outputs:\n - A normalized X, of same shape.\n Function:\n - Firstly, Normalize X by numpy.preprocessing.\n - Secondly, If -9999 is in a feature, we normalize this feature again, but without the -9999 ones.\n Thirdly, we paste the normalized feature back to X.\n e.g: - X = [1, 2, -9999]\n - Firstly, normalized to [0.01, 0.02, -1.5]. \n - Secondly, [1, 2] is normalized to [0.5, 0.6].\n - Thirdly, paste back, we get [0.5, 0.6, -1.5].\n This could keep the shape of X, makes indexing easier and doesn't harm the performance of kNN.\n \"\"\"\n X = X\n feature_dict = feature_dict\n features = using_features\n num_patients, num_features = X.shape\n full_data_patients = np.arange(num_patients)\n miss_data_patients = []\n miss_data_features = []\n \n for i in range(num_patients):\n for j in range(num_features):\n if X[i][j] < -9998:\n miss_data_patients.append(i)\n if j not in miss_data_features:\n miss_data_features.append(j)\n break\n full_data_patients = np.delete(full_data_patients, miss_data_patients)\n if not dont_show:\n print(\"\\nMissing data features:\")\n for j in miss_data_features:\n print(feature_dict[features[j]])\n print(\"\\nFull data patients:\")\n print(full_data_patients)\n #X_normalized = preprocessing.normalize(X, norm='l2', axis=0)\n scaler = preprocessing.QuantileTransformer(output_distribution='uniform')\n X_normalized = scaler.fit_transform(X)\n if miss_data_features:\n X_miss_data_feature = X[full_data_patients, miss_data_features].reshape(-1, 1) # Miss data feature of patients with full data.\n X_miss_data_feature_normalized = preprocessing.normalize(X_miss_data_feature, norm='l2', axis=0).reshape(-1) \n X_normalized[full_data_patients, miss_data_features] = X_miss_data_feature_normalized\n return X_normalized\n\ndef split_train_predict_set(X_normalized, y_to_makeup, dont_show=True):\n \"\"\"\n We remove the predicting patients out of training patients set.\n - Inputs:\n X_normalized: Numpy array of shape (Num_patients x Using_features) \n y_to_makeup: 1-dim numpy array. We want to makeup the missing data of this feature.\n - Outputs:\n X_train, X_predict: numpy array of (Num_train/predict_patients x Using_features)\n y_predict: 1-dim numpy array of length (Num_predict_patient).\n train_patients, predict_patients: A list of ints. We index train/predict patients by this list.\n \"\"\"\n X_normalized = X_normalized\n y_to_makeup = y_to_makeup\n dont_show = dont_show\n \n num_patients = X_normalized.shape[0]\n train_patients = np.arange(num_patients)\n predict_patients = []\n \n for i in range(num_patients):\n if y_to_makeup[i] < -9998:\n predict_patients.append(i)\n train_patients = np.delete(train_patients, predict_patients)\n\n X_train = X_normalized[train_patients]\n y_train = y_to_makeup[train_patients]\n X_predict = X_normalized[predict_patients]\n \n if not dont_show:\n print(\"X_train: {0}, X_predict: {1}, y_train: {2}\\n\".format(X_train.shape, X_predict.shape, y_train.shape))\n return X_train, y_train, X_predict, train_patients, predict_patients\n\n\n","sub_path":"utils/data_processing.py","file_name":"data_processing.py","file_ext":"py","file_size_in_byte":5853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"356672498","text":"#!/usr/bin/env python2\n# coding=utf-8\n\n\"\"\"\nThe default per-repository configuration\n\"\"\"\n\nimport json\nfrom os.path import exists, dirname\nfrom gitver.defines import CFGFILE\n\ndefault_config = {\n 'next_suffix': 'NEXT',\n 'next_custom_suffix': 'SNAPSHOT'\n}\n\n\ndef init_or_load_user_config():\n # try load user configuration\n try:\n with open(CFGFILE, 'r') as f:\n user = json.load(f)\n except (IOError, ValueError) as v:\n user = dict()\n\n # save to file as an example\n if not exists(CFGFILE):\n if exists(dirname(CFGFILE)):\n with open(CFGFILE, 'w') as f:\n json.dump(default_config, f)\n\n # merge user with defaults\n return dict(default_config, **user)\n\ncfg = init_or_load_user_config()\n","sub_path":"gitver/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"580714999","text":"row = ['Active', 'Name', 'Categories', 'Retail price', 'Tax rule', 'Short description', 'Description',\n 'Tags', 'Meta title', 'Meta description', 'URL rewritten', 'Available for order', 'Show price', 'Inbound', 'Outbound', 'Insurance', 'Subtotal']\n\n\nrow = ['Handle','Title','Body (HTML)','Vendor','Type','Tags','Published','Option1 Name','Option1 Value','Option2 Name','Option2 Value','Option3 Name','Option3 Value','Variant SKU','Variant Grams','Variant Inventory Tracker','Variant Inventory Policy','Variant Fulfillment Service','Variant Price\tVariant Compare At Price','Variant Requires Shipping','Variant Taxable','Variant Barcode','Image Src','Image Position','Image Alt Text','Gift Card','SEO Title','SEO Description','Google Shopping / Google Product Category','Google Shopping / Gender','Google Shopping / Age Group','Google Shopping / MPN','Google Shopping / AdWords Grouping','Google Shopping / AdWords Labels','Google Shopping / Condition','Google Shopping / Custom Product','Google Shopping / Custom Label 0','Google Shopping / Custom Label 1','Google Shopping / Custom Label 2','Google Shopping / Custom Label 3','Google Shopping / Custom Label 4','Variant Image','Variant Weight Unit','Variant Tax Code','Cost per item']\n\nrow = ['Handle','Title','Body (HTML)','Vendor','Type','Tags','Published','Option1 Name','Option1 Value','Option2 Name','Option2 Value','Option3 Name','Option3 Value','Variant SKU','Variant Grams','Variant Inventory Tracker','Variant Inventory Policy','Variant Fulfillment Service','Variant Price\tVariant Compare At Price','Variant Requires Shipping','Variant Taxable','Variant Barcode','Image Src','Image Position','Image Alt Text','Gift Card','SEO Title','SEO Description','Variant Image','Variant Weight Unit','Variant Tax Code','Cost per item']\n\npublished = 'TRUE'\nvendor = 'iFixYouri'\ntypeOf = ''\noption1 = 'Inbound Shipping'\noption2 = 'Outbound Shipping'\noption3 = 'Insurance'\nvariantSKU = ''\nvariantGrams = 0\nvariantInventoryTracker = ''\nvariantInventoryPolicy = 'continue'\nvariantFulFillmentService = 'manual'\nvariantCompareAtPrice = ''\nvariantRequiresShipping = 'TRUE'\nvariantBarcode = ''\nimageSrc = ''\ngiftCard = \"FALSE\"\nvariantImage = ''\nvariantWeightUnit = 'lb'\nvariantTaxCode = ''\ncostPerItem = ''\n\n\nrow = [urlRewritten, serviceName, longDescription, vendor, typeOf, tags, published, option1, inbound, option2, outbound, option3, ins, variantSKU, variantGrams, variantInventoryTracker, variantInventoryPolicy, variantFulFillmentService, cost + inboundShipping[inbound] + outboundShipping[outbound] + insurance[ins], variantCompareAtPrice, variantRequiresShipping, variantTaxable, variantBarcode, imageSrc, imagePosition, imageAltText, giftCard, metaTitle, metaDescription, variantImage, variantWeightUnit, variantTaxCode, costPerItem]\n\n\n# category, cost, taxRule, shortDescription, , tags, metaTitle, metaDescription, availableForOrder, showPrice, subtotal]","sub_path":"contentWriterNew/supplemental.py","file_name":"supplemental.py","file_ext":"py","file_size_in_byte":2917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"471500780","text":"# -*- coding: UTF-8 -*-\nfrom django.shortcuts import render\nfrom django.http import HttpResponse,HttpResponseRedirect\nfrom django.core.paginator import Paginator\nfrom django.core.paginator import EmptyPage\nfrom django.core.paginator import PageNotAnInteger\n# Create your views here.\n# for bootstrap test\n\n#包装csrf请求,避免django认为其实跨站攻击脚本\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.template.context_processors import csrf\nfrom django.shortcuts import render_to_response\n\nfrom .models import Applications, Branch\n\ndef home(request):\n return render(request,'add_app_base.html')\n\ndef index(request):\n return render(request,'home.html')\n\n@csrf_exempt\n\n# 添加app信息\ndef addApplication(request):\n name=request.POST['name']\n status=request.POST['status']\n app=Applications()\n app.name=name\n app.status=status\n app.save()\n return HttpResponseRedirect(\"/app_q\")\n\n#查询所有,并分页显示\ndef app_query(request):\n limit = 5 # 每页显示的记录数\n #students = Student.objects.all()\n apps = Applications.objects.all()\n paginator = Paginator(apps, limit) # 实例化一个分页对象\n page = request.GET.get('page') # 获取页码\n try:\n apps = paginator.page(page) # 获取某页对应的记录\n except PageNotAnInteger: # 如果页码不是个整数\n apps = paginator.page(1) # 取第一页的记录\n except EmptyPage: # 如果页码太大,没有相应的记录\n apps = paginator.page(paginator.num_pages) # 取最后一页的记录\n return render_to_response('app_curd_new.html',{'data':apps})\n\n#删除���据\ndef app_delByID(request):\n id=request.GET['id'];\n app=Applications.objects.get(id=id)\n app.delete()\n return HttpResponseRedirect(\"/app_q\")\n\n#更新一条数据\ndef app_showUid(request):\n id=request.GET['id'];\n app=Applications.objects.get(id=id) #得到具体数据,与filter输出返回类型不同\n return render_to_response('app_update.html',{'data':app})\n\n#显示一条数据\ndef app_queryById(request):\n id=request.GET['id'];\n if id == \"\": #若无输入,则转移到query查询所有\n return HttpResponseRedirect(\"/app_q\")\n app=Applications.objects.filter(id=id) #通过id 过滤结果,是一字典类型\n return render_to_response('app_curd.html',{'data':app})","sub_path":"branch/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"11830399","text":"## Try This 이름, 나이, 성별을 입력받아서 \"당신의 이름은 {}, 나이는 {}, 성별은 {}입니다.\" 출력하기\n\nuser_input = input(\"이름, 나이, 성별을 입력하여 주세요. >>> \")\nprint(\"당신이 입력하신 정보는 {} 입니다.\".format(user_input))\nmessage = \"당신의 이름은 {}, 나이는 {}, 성별은 {}입니다.\"\nuser_infor = user_input.split(',')\nprint(message.format(user_infor[0], user_infor[1], user_infor[2]))\n\n## Try This 두 수를 입력받아, 사칙연산을 수행하는 계산 프로그램을 만드시오.\n\nnumber_input = input(\"계산할 식을 입력하여 주세요. ex) 3 + 4 >>> \")\nprint(\"당신이 입력하신 계산식은 {} 입니다.\".format(number_input))\nequation = number_input.split(' ')\nif equation[1] == '+':\n rusult = equation[0] + equation[2]\nelif equation[1] == '-':\n result = equation[0] - equation[2]\nelif equation[1] == '*':\n result = equation[0] * equation[2]\nelif equation[1] == '/':\n result = equation[0] / equation[2]\nelse:\n print(\"잘못된 수식입니다. 다시 확인하시고 입력해주세요.\")\n\nprint(number_input, ' = ', )","sub_path":"python/[SAH005] python_function.py","file_name":"[SAH005] python_function.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"76113357","text":"# coding: utf-8\n\nimport flask\nimport auth\nimport model\nfrom main import app\n\n@app.route('/all/', methods=['GET', 'POST'])\n@auth.login_required\ndef view_all():\n user_db = auth.current_user_db()\n return flask.render_template(\n 'view_all.html',\n html_class='view_all',\n title='View All Notes',\n notes=enumerate(user_db.notes_list),\n more_than_zero=len(user_db.notes_list) > 0\n )","sub_path":"main/view_all.py","file_name":"view_all.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"419896150","text":"# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n# boj 11000 강의실 배정\n\nimport sys\nimport heapq\n\ninput = sys.stdin.readline\nn = int(input().rstrip())\nclass_times = [list(map(int, input().split())) for _ in range(n)]\nclass_times.sort(key = lambda x: x[0])\n\nqueue = []\nfor time in class_times:\n # queue[0]: 현재 진행 중인 강의 중에 가장 빨리 끝나는 강의 시간\n if queue and queue[0] <= time[0]:\n heapq.heappop(queue)\n heapq.heappush(queue, time[1])\n\nprint(len(queue))\n","sub_path":"Greedy/boj_2437.py","file_name":"boj_2437.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"31450466","text":"#!/usr/bin/env python3.4\n# -*- coding: UTF-8 -*-\n#\n# punktyadresowe_import.py CC-BY-NC-SA 3.0 WiktorN\n#\n# Based on work by Grzegorz Sapijaszko (http://wiki.openstreetmap.org/wiki/User:Gsapijaszko/punktyadresowe_import)\n#\n# dependencies:\n# Beautiful-Soup (http://www.crummy.com/software/BeautifulSoup/)\n# pip install beautifulsoup4\n# easy_install beautifulsoup4\n# apt-get install python-beautifulsoup4\n# portmaster www/py-beautifulsoup\n#\n# TODO:\n# - add spellchecking for street and city based on TERYT dictionaries\n# - street spellchecking - split into words and look for similar words in dictionary\n# - ideas: http://en.wikipedia.org/wiki/Levenshtein_distance , ngrams (http://en.wikipedia.org/wiki/N-gram),\n# Norvig spell-checker - http://norvig.com/spell-correct.html\n\nimport argparse\nimport json\nimport logging\nfrom functools import partial\n\nfrom data.base import convert_to_osm, AddressEncoder\nfrom data.egeoportal import EGeoportal\nfrom data.gisnet import GISNET\nfrom data.gugik import GUGiK, GUGiK_GML\nfrom data.impa import iMPA\nfrom data.warszawaum import WarszawaUM\n\n\n# stałe\n# _EPSG2180 = Proj(init='epsg:2180')\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Downloads data from iMPA and saves in OSM or JSON format. CC-BY-SA 3.0 @ WiktorN. Filename is \"\n \".osm or .json\"\n )\n parser.add_argument(\n \"--output-format\",\n choices=[\"json\", \"osm\"],\n help='output file format - \"json\" or \"osm\", default: osm',\n default=\"osm\",\n dest=\"output_format\",\n )\n parser.add_argument(\n \"--source\",\n choices=[\"impa\", \"gugik\", \"gugik_gml\", \"gisnet\", \"warszawa\", \"e-geoportal\"],\n help='input source: \"gugik\", \"impa\", \"gisnet\" or \"warszawa\". Gugik, gisnet and warszawa requires'\n \" providing teryt:terc code. gugik_gml requires to provide a filename as gmina. Defaults to\"\n ' \"impa\"',\n default=\"impa\",\n dest=\"source\",\n )\n parser.add_argument(\n \"--log-level\",\n help=\"Set logging level (debug=10, info=20, warning=30, error=40, critical=50), default: 20\",\n dest=\"log_level\",\n default=20,\n type=int,\n )\n parser.add_argument(\n \"--no-mapping\",\n help=\"Disable mapping of streets and cities\",\n dest=\"no_mapping\",\n default=False,\n action=\"store_const\",\n const=True,\n )\n parser.add_argument(\n \"--wms\",\n help=\"Override WMS address with address points\",\n dest=\"wms\",\n default=None,\n )\n parser.add_argument(\n \"--terc\",\n help=\"teryt:terc code which defines area of operation\",\n dest=\"terc\",\n default=None,\n )\n parser.add_argument(\n \"gmina\",\n nargs=\"?\",\n help=\"list of iMPA services to download or e-geoportal layer name\",\n )\n args = parser.parse_args()\n\n logging.basicConfig(level=args.log_level)\n\n if args.no_mapping:\n global mapstreet, mapcity\n mapstreet = lambda x, y: x\n mapcity = lambda x, y: x\n if args.source == \"impa\":\n imp_gen = partial(iMPA, wms=args.wms, terc=args.terc)\n elif args.source == \"gugik\":\n imp_gen = partial(GUGiK, terc=args.terc)\n elif args.source == \"gisnet\":\n imp_gen = partial(GISNET, terc=args.terc)\n if not args.gmina:\n raise Exception(\"You need to provide service name\")\n elif args.source == \"warszawa\":\n imp_gen = partial(WarszawaUM, terc=args.terc)\n elif args.source == \"gugik_gml\":\n imp_gen = partial(GUGiK_GML)\n elif args.source == \"e-geoportal\":\n imp_gen = partial(EGeoportal, terc=args.terc)\n if not args.gmina:\n raise Exception(\"You need to provide layer name\")\n else:\n raise Exception(\"Source not supported\")\n if args.gmina:\n # rets = parallel_execution(*map(lambda x: lambda: imp_gen(x).getAddresses(), args.gmina))\n ret = imp_gen(args.gmina).get_addresses()\n # rets = list(map(lambda x: imp_gen(x).getAddresses(), args.gmina)) # usefull for debugging\n else:\n ret = imp_gen().get_addresses()\n if args.output_format == \"json\":\n write_conv_func = lambda x: json.dumps(list(x), cls=AddressEncoder)\n file_suffix = \".json\"\n else:\n write_conv_func = convert_to_osm\n file_suffix = \".osm\"\n\n if args.gmina:\n with open(args.gmina + file_suffix, \"w+\", encoding=\"utf-8\") as f:\n f.write(write_conv_func(ret))\n else:\n fname = \"result.osm\"\n if args.terc:\n fname = \"%s.osm\" % (args.terc,)\n with open(fname, \"w+\", encoding=\"utf-8\") as f:\n f.write(write_conv_func(ret))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"punktyadresowe_import.py","file_name":"punktyadresowe_import.py","file_ext":"py","file_size_in_byte":4759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"518832466","text":"import smtplib\nfrom email.message import EmailMessage\n\nemail = EmailMessage()\n\n#username, password, receiver, subject, message \ndef send_email(usr, pwd, to, sub, msg):\n\temail[\"from\"] = usr\n\temail[\"to\"] = to\n\temail[\"subject\"] = sub\n\t\n\temail.set_content(msg)\n\n #host by mail.com, wokrs only with those emails\n\twith smtplib.SMTP(host=\"smtp.mail.com\", port=587) as smtp:\n\t\tsmtp.ehlo()\n\t\tsmtp.starttls()\n\t\tsmtp.login(usr, pwd)\n\t\tsmtp.send_message(email)\n","sub_path":"send_mail.py","file_name":"send_mail.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"476183905","text":"from . import constants\n\nclass Particle(object):\n \"\"\"\n Describes a particle; holds mass, momentum, energy, charge etc info\n \"\"\"\n def __init__(self):\n \"\"\"\n Initialise the particle to \"NULL\"\n \"\"\"\n self.p = 0. # total momentum\n self.mass = 0.\n self.charge = 0.\n self.energy = 0. # total energy\n self.gamma_rel = 1.\n self.beta_rel = 0.\n self.pid = 0 # PDG PID code\n\n @classmethod\n def new_from_ke(cls, kinetic_energy, pid):\n \"\"\"\n Generate a new particle based on kinetic energy\n - kinetic_energy: KE in MeV\n - pid: pdg pid number\n Returns an object of type particle\n \"\"\"\n particle = Particle()\n particle.set_pid(pid)\n particle.set_kinetic_energy(kinetic_energy)\n return particle\n\n @classmethod\n def new_from_momentum(cls, momentum, pid):\n \"\"\"\n Generate a new particle based on momentum\n - momentum: KE in MeV\n - pid: pdg pid number\n Returns an object of type particle\n \"\"\"\n particle = Particle()\n particle.set_pid(pid)\n particle.set_momentum(momentum)\n return particle\n\n def set_pid(self, new_pid):\n \"\"\"\n Change the pid; preserves energy and momentum, even if this leaves the\n total energy < mass which is non-physical\n \"\"\"\n self.mass = constants.get_mass(new_pid)\n self.charge = constants.get_charge(new_pid)\n self.pid = new_pid\n\n def get_momentum(self):\n return self.p\n\n def set_momentum(self, new_momentum):\n self.p = new_momentum\n self.energy = (self.p**2+self.mass**2)**0.5\n self.gamma_rel = self.energy/self.mass\n self.beta_rel = self.p/self.energy\n\n def get_kinetic_energy(self):\n return self.energy - self.mass\n\n def set_kinetic_energy(self, new_ke):\n self.energy = new_ke + self.mass\n self.gamma_rel = self.energy/self.mass\n self.p = (self.energy**2-self.mass**2)**0.5\n self.beta_rel = self.p/self.energy\n\n def set_energy(self, new_energy):\n self.energy = new_energy\n self.gamma_rel = self.energy/self.mass\n self.p = (self.energy**2-self.mass**2)**0.5\n self.beta_rel = self.p/self.energy\n","sub_path":"scripts/toy_model/foil_model/particle.py","file_name":"particle.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"232316706","text":"class Solution(object):\n def findDiagonalOrder(self, nums):\n \"\"\"\n :type nums: List[List[int]]\n :rtype: List[int]\n \"\"\"\n res = []\n for i in range(len(nums)):\n for j in range(len(nums[i])):\n if i+j >= len(res):\n res.append([])\n res[i+j].append(nums[i][j])\n \n ans = []\n for i in range(len(res)):\n ans.extend(res[i][::-1])\n return ans","sub_path":"1424. Diagonal Traverse II.py","file_name":"1424. Diagonal Traverse II.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"565346041","text":"import math\nfrom chardet.universaldetector import UniversalDetector\nimport copy\nimport random\n\n# Длина слова в коде Хемминга - 53\n\n# Вспомогательные методы\n#ord(символ) - символ в его код в ascii\n#chr(число) - код ascii в символ\n#print(int('1101', base=2)) - перевод из двоичной сс и десятичную\n\n# Считывание текста из файла с заданной кодировкой\ndef read_file(path, encoding):\n text = \"\"\n with open(path, \"r\", encoding = encoding) as f:\n for line in f.readlines():\n text += line\n return text\n\n# Определение кодировки заданного файла\ndef get_encoding(path):\n detector = UniversalDetector()\n with open(path, 'rb') as fh:\n for line in fh:\n detector.feed(line)\n if detector.done:\n break\n detector.close()\n if (detector.result == None):\n print(\"Неизвестная кодиковка. Невозможно открыть прочитать текст\")\n exit()\n return detector.result\n\n# Кодировка из значения ascii в массив с двоичным кодом\ndef get_bit_mas(code, word_length):\n bit_mas = []\n for i in range(len(code)):\n s = bin(code[i])\n s = s[2:] # Удаляем первые символы '0b' из двоичной кодировки\n while(len(s) != word_length): # Добавляем 0, если слово короче заданной длины\n #print(\"Тут проблема\")\n s = '0' + s\n for j in range(len(s)):\n bit_mas.append(int(s[j]))\n #print(\"И тут тоже\")\n return bit_mas\n\n# Кодировка из массива с двоичным кодом в значения ascii\ndef get_ascii_code(bit_mas, word_length):\n code = []\n while(len(bit_mas) != 0):\n value = bit_mas[0:word_length]\n bit_mas = bit_mas[word_length:]\n s = \"\"\n for i in range(len(value)):\n s = s + str(value[i])\n #print(\"value = \")\n #print(value)\n code.append(int(s, base = 2))\n return code\n\n# Перевод из значений ascii в текст\ndef get_text(code, word_length, encoding):\n text = \"\"\n for i in range(len(code)):\n #print(chr(code[i]))\n text = text + chr(code[i])\n #print(text)\n\n file_path = \"my_file.txt\"\n my_file = open(file_path, \"w\")\n my_file.write(text)\n my_file.close()\n\n new_encoding = get_encoding(file_path)\n \n print(\"new_encoding = \")\n print(new_encoding['encoding'])\n print()\n\n #text = text.decode(encoding['encoding'])\n #text = text.decode(encoding['encoding']) # Ошибка\n return text\n\n# Реализация кода Хемминга\ndef Hamming_code(bit_mas, length_hamming_word_input, count_of_errors):\n Hamming_mas = [] # Список с готовым кодом\n\n count_of_iterations = math.ceil(len(bit_mas) / length_hamming_word_input)\n\n # Считываем одно слово\n for k in range(count_of_iterations):\n #print(\"k = \", k)\n try:\n value_word = bit_mas[0:length_hamming_word_input]\n bit_mas = bit_mas[length_hamming_word_input:]\n except:\n value_word = bit_mas\n bit_mas = []\n\n # Вставка контрольных битов (по умолчанию принимают 0)\n counter = 0\n value = 0\n while(value < len(value_word)):\n #print(\"value = \", value)\n value_word.insert(value, 0)\n counter = counter + 1\n value = (2 ** counter) - 1\n\n # Установка значений контрольных битов\n counter = 0\n value = 2 ** counter\n while(value - 1 < len(value_word)):\n i = value - 1\n count_of_one = 0\n while(i < len(value_word)):\n for j in range(value):\n if(value_word[i] == 1):\n count_of_one = count_of_one + 1\n i = i + 1\n if(i >= len(value_word)):\n break\n i = i + value\n value_word[value - 1] = count_of_one % 2\n counter = counter + 1\n value = 2 ** counter\n\n ## Генерация и вставка случайного (в заданном диапазоне) количества ошибок\n ##random.seed(1)\n #if(count_of_errors != -1):\n # probability = count_of_errors * random.random()\n # #print(\"Вероятность равна\", probability)\n # value_count_of_errors = round(probability)\n # print(\"Количество вставляемых ошибок равно\", value_count_of_errors)\n\n # for i in range(value_count_of_errors):\n # number = math.floor(len(value_word) * random.random())\n # if(value_word[number] == 0):\n # value_word[number] = 1\n # elif(value_word[number] == 1):\n # value_word[number] = 0\n\n # Генерация и вставка заданного количества ошибок\n if(count_of_errors != -1):\n for i in range(count_of_errors):\n number = math.floor(len(value_word) * random.random())\n if(value_word[number] == 0):\n value_word[number] = 1\n elif(value_word[number] == 1):\n value_word[number] = 0\n\n # Добавление текущего слова в готовый список\n for i in range(len(value_word)):\n Hamming_mas.append(value_word[i])\n\n return Hamming_mas\n\n# Повторное вычисление кода Хемминга на принимающей стороне\ndef Repeat_Hamming_code(Hamming_mas, length_hamming_word_input):\n #bit_mas = []\n count_of_true_words = 0\n count_of_false_words = 0\n count_of_corrected_words = 0\n count_of_uncorrected_words = 0\n\n validation_hamming_mas = []\n Hamming_mas_copy = copy.deepcopy(Hamming_mas)\n\n length_hamming_word_output = length_hamming_word_input + math.floor(math.log(length_hamming_word_input, 2)) + 1\n \n count_of_iterations = math.ceil(len(Hamming_mas_copy) / length_hamming_word_output)\n\n for k in range(count_of_iterations):\n try:\n value_word = Hamming_mas_copy[0:length_hamming_word_output]\n Hamming_mas_copy = Hamming_mas_copy[length_hamming_word_output:]\n except:\n value_word = Hamming_mas_copy\n Hamming_mas_copy = []\n\n # Обнуление контрольных битов\n counter = 0\n value = 0\n while(value < len(value_word)):\n #print(\"value = \", value)\n value_word[value] = 0\n counter = counter + 1\n value = (2 ** counter) - 1\n\n # Установка значений контрольных битов\n counter = 0\n value = 2 ** counter\n while(value - 1 < len(value_word)):\n i = value - 1\n count_of_one = 0\n while(i < len(value_word)):\n for j in range(value):\n if(value_word[i] == 1):\n count_of_one = count_of_one + 1\n i = i + 1\n if(i >= len(value_word)):\n break\n i = i + value\n value_word[value - 1] = count_of_one % 2\n counter = counter + 1\n value = 2 ** counter\n\n try:\n first_word = Hamming_mas[k * length_hamming_word_output:(k + 1) * length_hamming_word_output]\n except:\n first_word = Hamming_mas[k * length_hamming_word_output:]\n\n # Проверяем наличиние ошибок\n if(value_word == first_word):\n count_of_true_words = count_of_true_words + 1\n print(\"Слово передалось верно\")\n else:\n count_of_false_words = count_of_false_words + 1\n print(\"Слово передалось неверно. Попытка исправить\")\n print(\"count_of_false_words =\", count_of_false_words)\n\n # Находим ошибочный бит\n counter = 0\n value = 0\n number_of_error_bit = 0\n while(value < len(value_word)):\n if(value_word[value] != first_word[value]):\n number_of_error_bit = number_of_error_bit + (value + 1)\n counter = counter + 1\n value = (2 ** counter) - 1\n number_of_error_bit = number_of_error_bit - 1\n\n #print(\"number_of_error_bit = \", number_of_error_bit)\n\n # Исправляем слово (в текущем и \"валидационном\" слове)\n if(number_of_error_bit < len(value_word)):\n if(value_word[number_of_error_bit] == 0):\n first_word[number_of_error_bit] = 1\n value_word[number_of_error_bit] = 1\n elif(value_word[number_of_error_bit] == 1):\n first_word[number_of_error_bit] = 0\n value_word[number_of_error_bit] = 0\n\n # Пересчитываем контрольные биты\n # Вставка контрольных битов (по умолчанию принимают 0)\n counter = 0\n value = 0\n while(value < len(value_word)):\n #print(\"value = \", value)\n value_word[value] = 0\n counter = counter + 1\n value = (2 ** counter) - 1\n\n # Установка значений контрольных битов\n counter = 0\n value = 2 ** counter\n while(value - 1 < len(value_word)):\n i = value - 1\n count_of_one = 0\n while(i < len(value_word)):\n for j in range(value):\n if(value_word[i] == 1):\n count_of_one = count_of_one + 1\n i = i + 1\n if(i >= len(value_word)):\n break\n i = i + value\n value_word[value - 1] = count_of_one % 2\n counter = counter + 1\n value = 2 ** counter\n\n if(value_word == first_word):\n count_of_corrected_words = count_of_corrected_words + 1\n print(\"Слово удалось исправить\")\n else:\n count_of_uncorrected_words = count_of_uncorrected_words + 1\n print(\"Слово не удалось исправить\")\n\n # Убираем контрольные биты\n counter = math.floor(math.log(len(value_word), 2))\n value = (2 ** counter) - 1\n while(counter >= 0):\n value_word.pop(value)\n counter = counter - 1\n value = (2 ** counter) - 1\n\n for i in range(len(value_word)):\n validation_hamming_mas.append(value_word[i])\n\n print(\"validation_hamming_mas = \")\n print(validation_hamming_mas)\n print()\n\n return validation_hamming_mas, count_of_true_words, count_of_false_words, count_of_corrected_words, count_of_uncorrected_words\n\n# Перевод из десятичной сс в двоичную (число -> список)\ndef dec_bin(number):\n number_copy = copy.deepcopy(number)\n l = []\n while(number_copy != 0):\n l.append(number_copy % 2)\n number_copy = math.floor(number_copy / 2)\n l.reverse()\n print(\"Десятичное число:\")\n print(l)\n return l\n\n# Перевод из двоичной сс в десятичную (список -> число)\ndef bin_dec(l):\n l_copy = copy.deepcopy(l)\n number = 0\n for i in range(len(l_copy)):\n number = number + (l_copy[i] * (2 ** (len(l_copy) - i - 1)))\n print(\"Двоичное число\")\n print(number)\n return number\n","sub_path":"python/Hamming_code/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":12275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"152404994","text":"from utils.functions import parse \nfrom utils.containers import ItemConverter\nfrom rnn import RNN\nfrom gru import GRU\nfrom lstm import LSTM\n\ndef main():\n args = parse()\n converter = ItemConverter()\n wordconverter, train_input = converter.prepare_data(args.train_text, item_size=args.input_size, sequence_mode=True)\n posconverter, train_gold = converter.prepare_data(args.train_pos, unk_flag=False, sequence_mode=True, seq_output_mode=True)\n if args.rnn_model == 'rnn':\n RNN_Model = RNN\n elif args.rnn_model == 'gru':\n RNN_Model = GRU\n elif args.rnn_model == 'lstm':\n RNN_Model = LSTM\n\n rnn = RNN_Model(input_size=wordconverter.item_num(), output_size=posconverter.item_num(), \\\n hidden_size=args.hidden_size, bias=args.bias, batch_size=args.batch_size)\n rnn.set_optimizer(args.optimizer, learning_rate=args.learning_rate)\n rnn.fit(train_input, train_gold, epoch=args.epoch)\n if args.test:\n with open(args.output, \"w\") as f:\n test_input = wordconverter.create_allvec(args.test, sequence_mode=True)\n outputs = list()\n for input_mats in test_input:\n outputs.append(rnn.predict(input_mats, mode=\"test\"))\n print(posconverter.set_of_vecs2sent(outputs), end=\"\", file=f)\n \nif __name__==\"__main__\":\n main()\n","sub_path":"src/run_rnn.py","file_name":"run_rnn.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"227133157","text":"\ndef escape_special_char(val:str):\n return val.replace('\\'', '\\'\\'').replace('\\\\', '\\\\\\\\')\n \ndef needs_dollar_quote(val:any, check_for_qoute=False) -> str:\n if not check_for_qoute:\n return '%s' % val\n if type(val) is str:\n return '\\'%s\\'' % escape_special_char(val)\n elif (type(val) is int) or (type(val) is float):\n return '%d' % val\n elif val is None:\n return '' \n else:\n raise Exception('Unhandled value type: %s' % type(val)) \n\ndef levenshtein_distance(source:str, target:str) -> int:\n v0 = [0] * (len(target) + 1) \n v1 = [0] * (len(target) + 1)\n\n for i in range(0, len(target) + 1):\n v0[i] = i\n\n for i in range(0, len(source)):\n # first element of the target vector is A[i+1][0]\n # edit distance is delete (i+1) chars from s to match empty t\n v1[0] = i + 1 \n\n for j in range(0, len(target)):\n # calculating costs for A[i+1][j+1]\n deletionCost = v0[j + 1] + 1\n insertionCost = v1[j] + 1\n if source[i] == target[j]:\n substitutionCost = v0[j]\n else:\n substitutionCost = v0[j] + 1\n\n v1[j + 1] = min(deletionCost, insertionCost, substitutionCost)\n\n # copy s_v (current row) to s_v (previous row) for next iteration\n a = v1\n v1 = v0\n v0 = a \n return v0[len(target)] \n\ndef levenshtein_distance_percentage(source:str, target:str) -> float:\n lev = levenshtein_distance(source=source, target=target)\n return lev / len(target)","sub_path":"app/db/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"341442716","text":"\nimport random\n\n\"\"\"\n--- Dialog Options ---\n\nNames follow patterns. For example, option_412 is called by option_41 which is called by option_4 which is one of the\nfirst set of options.\n\"\"\"\n\n# -- Option tree 1 --\n\n\n\ndef option_1():\n print(\"I will tell you how to find the grail- but only if you answer me this riddle. Will you hear my riddle?\\n\")\n answer = get_answer([\"Yes\", \"No\"])\n if answer == 1:\n option_11()\n else:\n option_12()\n\n\ndef option_11():\n print(\"Here is the riddle: White is my castle, but doors have I none. My treasure is hidden between not four walls... but one. What am I?\")\n answer = get_answer([\"A white rock!\", \"An egg!\", \"A castle without doors and only one wall.\", \"That riddle SUCKS!\"])\n if answer == 1:\n option_111()\n elif answer == 2:\n option_112()\n elif answer == 3:\n option_113()\n else:\n option_114()\n\n\ndef option_111():\n print(\"That is incorrect. I will not tell you the location of the grail!\\n\")\n\n\ndef option_112():\n print(\"That is correct!\\n *Bartamaeus pulls something out of his pocket and hands it to you.*\\nThe grail was in my back pocket the whole time!\\n Since you answered my riddle correctly, I will give it to you!\\n\")\n print(\"Congratulations on finishing your quest!\")\n\n\ndef option_113():\n print(\"I don't think you understand how riddles work... Let's try that again!\\n\")\n option_11()\n\n\ndef option_114():\n print(\"If you do not answer my riddle now you will never find the grail!\\n Will you answer it or not!?\\n\")\n answer = get_answer([\"Fine, I guess...\", \"No way, LOSER!\"])\n if answer == 1:\n option_11()\n else:\n print(\"*Bartamaeus becomes enraged, conjures up a spell of some sort and fires it at you.*\\n*You black out and when you wake up, you have turned into a frog.*\\nRibbit!\")\n\ndef option_12():\n print(\"Very well. I wish you good fortune on your quest.\")\n\n\n# -- Option Tree 2 --\n\ndef option_2():\n print(\"Wisdom comes with age, good traveler! *Bartamaeus strokes his beard.*\\n\")\n answer = get_answer([\"Is there any wisdom you can impart on me?\", \"Then I shall wait until I am old!\",\n \"You would know, GRAMPS!\"])\n if answer == 1:\n option_21()\n elif answer == 2:\n option_22()\n elif answer == 3:\n option_23()\n\n\ndef option_21():\n print(\"What do you wish to know?\")\n answer = get_answer([\"How can I become rich?\", \"I need dating advice!\", \"I want to learn to fly!\"])\n if answer == 1:\n option_211()\n elif answer == 2:\n option_212()\n else:\n option_213()\n\n\ndef option_211():\n print(\"A penny saved is a penny earned. I suggest saving money!\")\n\n\ndef option_212():\n print(\"Treat them right and they will treat you right.\")\n\n\ndef option_213():\n print(\"*Bartamaeus casts a spell on you that launches you up 600 feet into the air*\\n*You frantically flap your arms while plummeting to the earth*\\n*Feet before the ground to begin to ascend in the air*\\n\\nCongratulations! You have successfully learned to fly!\")\n\n\ndef option_22():\n print(\"*You wait, standing in the same spot, staring at Bartamaeus with a blank expression for the rest of your life.*\\n\\nCongratulations! You died!\")\n\n\ndef option_23():\n print(\"\\\"Gramps\\\" is what they call me at the country club! How did you know that?\")\n answer = get_answer([\"Because I'm a wizard!\", \"Lucky guess.\"])\n if answer == 1:\n option_231()\n elif answer == 2:\n print(\"*Bartamaeus laughs.* Anyway, I will tell you some wisdom.\")\n option_21()\n\n\ndef option_231():\n print(\"Well, I am sure that a wizard is wiser than I. You need no wisdom from me. I bid you farewell!\")\n\n\n# -- Option Tree 3 --\n\n\ndef option_3():\n print(\"*Bartamaeus retrieves two italian sub-sandwiches from his rucksack and offers one to you*\\n\\nYou spend the afternoon sharing stories and eating sandwiches. What a wonderful day!\")\n\n\n# -- Option Tree 4 --\n\n\ndef option_4():\n print(\"*Bartamaeus reaches into his pockets and retrieves two coins clasped in each hand*\\n\\nVery well, this is all I have. But I must warn you!\\nOne of these coins is blessed to bring its possessor great power! But the other is cursed, to bring its possessor a great cursing. Which will you choose?\")\n answer = get_answer([\"I choose the one in your right hand.\", \"I choose the one in your left hand.\", \"I will not choose.\"])\n if answer == 1 or answer == 2:\n option_4_random()\n else:\n option_43()\n\n\ndef option_4_random():\n chance = random.randint(0,1)\n if chance == 1:\n option_4_good()\n else:\n option_4_bad()\n\n\ndef option_4_good():\n print(\"As you grab the coin, you feel a sudden surge of energy course through your body. Suddenly you are filled with immeasurable power!\\n\\nCongratulations! You have become ruler of the universe!\")\n\n\ndef option_4_bad():\n print(\"As you grab the coin, you feel your pants drop to your ankles. Everyone you know suddenly appears and starts laughing at you.\\n\\nCongratulations! You are the laughing stock of the town!\")\n\n\ndef option_43():\n print(\"Very well. I bid you a safe journey hence.\")\n\n\n\"\"\"\n--- Other functions ---\n\"\"\"\n\n\ndef get_answer(option_list):\n \"\"\"Returns the user's answer choice to the NPC's question/statement as an integer\"\"\"\n # Print the options\n print(\"Options:\")\n for i in range(len(option_list)):\n print(f\"{i + 1}. {option_list[i]}\")\n\n # Return the selected option from the user\n while True:\n try:\n selection = int(input(\">>>\"))\n if 1 <= selection <= len(option_list):\n print()\n return selection\n else:\n raise ValueError\n except ValueError:\n print(f\"Invalid option: Must be a number between 1 and {len(option_list)}\")\n\n\n\"\"\"\n--- Start the Conversation ---\nStart out with some sort of introction on how our conversation with the NPC starts.\n\"\"\"\n\nprint(\"Greetings traveler. My name is Bartamaeus. \\n\\nWhat do you seek?\\n\")\n\nanswer = get_answer([\"I seek the holy grail!\", \"I seek the wisdom.\", \"I just want a sandwich...\",\n \"I want all of your money!\", ])\n\nif answer == 1:\n option_1()\nelif answer == 2:\n option_2()\nelif answer == 3:\n option_3()\nelse:\n option_4()\n","sub_path":"NPC/npc.py","file_name":"npc.py","file_ext":"py","file_size_in_byte":6329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"237592660","text":"# insert copyleft licence here\n\nimport numpy as np\nfrom math import log10, floor\nimport math\nfrom bokeh.models import Label\n\n\nclass iterating_colors:\n \"\"\"\n This class is made to mimic matlab-like color plotting behavior. Calling get_next() method\n returns new color in list, without hazzle. By default colorset is same as in matlab\n \"\"\"\n def __init__(self, palette=\"matlab\"):\n if palette == \"matlab\":\n self.colors = [ # matlab_line_plot_colors\n [0.0000, 0.4470, 0.7410],\n [0.8500, 0.3250, 0.0980],\n [0.9290, 0.6940, 0.1250],\n [0.4940, 0.1840, 0.5560],\n [0.4660, 0.6740, 0.1880],\n [0.3010, 0.7450, 0.9330],\n [0.6350, 0.0780, 0.1840]]\n\n if palette == \"octave\":\n self.colors = [ # octave and old matlab (version <= R2014a) line plotting colors\n [0.00000, 0.00000, 1.00000],\n [0.00000, 0.50000, 0.00000],\n [1.00000, 0.00000, 0.00000],\n [0.00000, 0.75000, 0.75000],\n [0.75000, 0.00000, 0.75000],\n [0.75000, 0.75000, 0.00000],\n [0.25000, 0.25000, 0.25000]]\n\n if palette == \"long\":\n # from\n # http://blogs.mathworks.com/pick/2008/08/15/colors-for-your-multi-line-plots/\n self.colors = [\n [0.00, 0.00, 1.00],\n [0.00, 0.50, 0.00],\n [1.00, 0.00, 0.00],\n [0.00, 0.75, 0.75],\n [0.75, 0.00, 0.75],\n [0.75, 0.75, 0.00],\n [0.25, 0.25, 0.25],\n [0.75, 0.25, 0.25],\n [0.95, 0.95, 0.00],\n [0.25, 0.25, 0.75],\n [0.75, 0.75, 0.75],\n [0.00, 1.00, 0.00],\n [0.76, 0.57, 0.17],\n [0.54, 0.63, 0.22],\n [0.34, 0.57, 0.92],\n [1.00, 0.10, 0.60],\n [0.88, 0.75, 0.73],\n [0.10, 0.49, 0.47],\n [0.66, 0.34, 0.65],\n [0.99, 0.41, 0.23]]\n\n self.current_index = 0\n\n def get_next(self):\n \"\"\"\n Returns next color in colormap, color jumps to first one after the last color is used.\n :return: tuple length of 3, type: int varitying from 0 to 255.\n \"\"\"\n m = len(self.colors) # the number of rows\n color = self.colors[self.current_index % m]\n RGB_color = [round(x*255.45) for x in color]\n self.current_index += 1\n return tuple(RGB_color)\n\n def reset(self):\n self.current_index = 0\n\n\ndef linear_regression_origo(x_axis, data_points):\n \"\"\"\n This fuction calulates linear regresion (slope and error) for line which goes through origo.\n The approach is rather manual, but precise and clear.\n :param data_points: numpy array\n :return:\n micro slope of fitted line\n err mean error in that slope\n \"\"\"\n # these libraries are shit\n # return np.linalg.lstsq(x_axis,data_points)[0]\n\n # Numpy arrays can't be column vectors! Not lying! They are that feeble and ambiguous by default.\n x_mat = np.transpose(np.matrix(x_axis))\n y_mat = np.transpose(np.matrix(data_points))\n\n # If it is wanted that origin is not fixed, then replace x_mat by X_mat in micro calulations\n X_mat = np.concatenate((x_mat, np.ones((x_axis.shape[0], 1))), axis=1)\n\n # No really, these libraries _are_ pure shit! They break down if some matrix-dimension is one!\n # return np.linalg.lstsq(X_mat, y_mat)[0]\n\n # Then let's do it the hard way.\n\n # https://en.wikipedia.org/wiki/Linear_regression\n # https://en.wikipedia.org/wiki/Least_squares\n micro = (np.linalg.inv(x_mat.transpose() * x_mat) * np.transpose(x_mat) * y_mat)[0, 0]\n\n # https://en.wikipedia.org/wiki/Mean_squared_error\n # https://en.wikipedia.org/wiki/Standard_deviation\n # https://en.wikipedia.org/wiki/Simple_linear_regression#Normality_assumption\n x_mean = np.sum(x_mat)/x_mat.size\n dof = 1 # degrees of freedom\n MSE = (1/(y_mat.size-dof)) * \\\n np.sum(np.multiply((y_mat-micro*x_mat), (y_mat-micro*x_mat))) / \\\n np.sum(np.multiply((x_mat-x_mean), (x_mat-x_mean)))\n err = np.sqrt(MSE)\n\n return micro, err\n\n\ndef print_to_latex_tabular(matrix, column_precisions=None, significant_figures=False):\n \"\"\"\n Prints 2d-numpy arrays (or regular lists) to latex tabular format. Then just copy-paste it.\n\n :param matrix: matrix to print\n (list or numpy_array)\n :param column_precision: single value OR array of precisions for each column\n (int, list or numpy_array, len=columns)\n :param significant_figures: if false then the column_precisions corresponds normal decimal precision\n if true then column_precisions corresponds the number of numbers to be printed\n (bool)\n :return:\n\n Examples:\n column_precisions=[...,4,...], significant_figures=True:\n 0.0012345678 -> 0.001234\n column_precisions=[...,4,...], significant_figures=False:\n 0.0012345678 -> 0.0012\n \"\"\"\n\n # I f***ing hate numpy's s***ty and poor arrays. In this case the second dimension is totally\n # undefined when it is an 1D-array. So some extra unnecessary code is required for\n # ridiculously simple things. This is NOT what python ought to be.\n array = np.matrix(matrix) # here I have contradictory naming just for joy of python\n\n # python syntax for checking if np.shape empty tuple, i.e. col_pres is int. Clear? Not.\n if (column_precisions is not None) and not np.shape(column_precisions):\n col_pres = np.ones(np.shape(array)[1], dtype=np.int) * column_precisions\n else:\n col_pres = column_precisions\n\n if (col_pres is not None) and (np.shape(array)[1] != len(col_pres)):\n print(array)\n print(\" np.shape(array)[1]\", np.shape(array)[1], \" len(col_pres)\", len(col_pres))\n raise Exception(\"col_pres should be vector of length of columns\")\n\n array_to_print = [[\"\" for n in range(np.shape(array)[1])] for m in range(np.shape(array)[0])]\n\n # convert array to printable form\n for m in range(np.shape(array)[0]):\n for n in range(np.shape(array)[1]):\n\n if col_pres is None:\n array_to_print[m][n] = str(array[m, n])\n elif significant_figures:\n # logarithm and value of exact zero is not a good combination\n if not math.isclose(array[m, n], 0):\n pres = -int(floor(log10(abs(array[m, n]))))+col_pres[n]-1\n else:\n pres = col_pres[n]\n\n if pres > 0:\n array_to_print[m][n] = (\"{:.\" + str(pres) + \"f}\").format(round(array[m, n], pres))\n else:\n array_to_print[m][n] = str(int(round(array[m, n], pres)))\n\n elif (not significant_figures) and (col_pres[n] > 0):\n pres = col_pres[n]\n array_to_print[m][n] = (\"{:.\"+str(pres)+\"f}\").format(round(array[m, n], pres))\n # print no decimals at all (integers), (negative col_pres values are permitted)\n else:\n array_to_print[m][n] = str(int(round(array[m, n], col_pres[n])))\n\n # find the column lengths (cells with most characters)\n max_column_len = np.amax(np.vectorize(lambda cell: len(cell))(array_to_print), axis=0)\n\n print(\"\")\n print(\"\\\\begin{tabular}{\" + np.shape(array)[1]*\"|l\" + \"|}\\n\", end=\"\", sep=\"\")\n print(\"\\\\hline\")\n print((\" & \" * (np.shape(array)[1]-1) + \" \\\\\\\\\"))\n print(\"\\\\hline\")\n for m in range(np.shape(array)[0]):\n for n in range(np.shape(array)[1]):\n # print and trailing spaces to max width so tabulars are nicely readable\n print((\"{:\"+str(max_column_len[n])+\"}\").format(array_to_print[m][n]), end=\"\", sep=\"\")\n if n != np.shape(array)[1]-1:\n print(\" & \", end=\"\", sep=\"\")\n else:\n print(\" \\\\\\\\\\n\", end=\"\", sep=\"\")\n print(\"\\\\hline\")\n print(r\"\\end{tabular}\")\n print(\"\")\n","sub_path":"interferometer/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":8358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"556712916","text":"\nfrom CO2.bluepy_back import BluepyBack\nfrom datetime import datetime\nimport time\nfrom struct import unpack, pack\n\n\n_BATTERY = 0X180F\n_SENSOR_SERVICE = 0xAA01\n_SETTINGS_SERVICE = 0XAA02\n_DEVICE_INFO_SERVICE = 0X180A\n_GENERIC_ACCESS = 0x1800\n\n#Characteristics:\n#Battery\n_BATTERY_LEVEL = 0X2A19\n\n#Settings\n_SENSORS_SAMPLE_RATE = 0XBB05\n_SHUT_DOWN = 0XBB06\n_PASSKEY = 0XBB07\n\n#Settins data to write\n_SHUT_DOWN_DATA = 0x01AA01AA\n\n#Device Info\n_SERIAL_NUMBER = 0x2A25\n_FIRMWARE_REVISION = 0x2A26\n_HARDWARE_REVISION = 0x2A27\n_MANUFACTURER_NAME = 0x2A29\n\n#Sensor readings\n_TEMPERATURE = 0xBB01\n_PRESSURE = 0xBB02\n_HUMIDITY_LEVEL = 0xBB03\n_CO2_LEVEL = 0xBB04\n\n\nclass BleConnection(object):\n def __init__(self, mac, adapter = 'hci0'):\n self._mac = mac\n self._connect = BluepyBack(self._mac) #_connect is a BluepyBack object\n\n def __enter__(self):\n self._connect.connect() #_connect cals a function connect of BlueBack class\n return self._connect # and __eneter__ returns object connect\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n try:\n self._connect.disconnect()\n self._connect = None\n except:\n pass \n\n\"\"\"\nClass DeviceCache stores values updated by last update that happened.\nLater in applicaton, function getParameter from Poller Class returns values from DeviceCache.\nMain idea of this principle is to save battery life of the sensor, because sensor is measuring slow changing parameters.\n\"\"\"\nclass DeviceCache(object):\n temperature = 0\n pressure = 0\n humidity_level = 0\n co2_level = 0\n battery_level = 0\n firmware = None\n hardware = None\n serial = None\n manufacturer = None\n first_update = False\n is_cached = False\n\n def __init__(self, time_out):\n self._timeout = time_out\n self._update_time = datetime.now()\n\n\n\nclass DataHandler(object):\n def __init__(self, mac, adapter = 'hci0', cache_timeout = 300, update_cache = True, sensor_sample_rate = 10): \n self._mac = mac\n self._adapter = adapter\n self._cache = DeviceCache(cache_timeout)\n self._sample_rate = sensor_sample_rate\n self.update_timer = update_cache #knowing mac address we can connect using (with as :)\n # Function that gets data from sensor\n\n def updateAllData(self):\n\n # Updating sensor readings\n # unpacking them also to \"translate C values to python values\"\n with BleConnection(self._mac) as sensor:\n\n \"\"\"\n We need to read those informations only once, when initiating the sensor.\n those data don't change their value very often, so there is no need to waste battery on those updates.\n \"\"\"\n if self._cache.first_update == False:\n\n ##Setting sample rate\n data = int(str(0x000000) + str(self._sample_rate))\n byte = pack('i', data)\n sensor.write(_SETTINGS_SERVICE, _SENSORS_SAMPLE_RATE, byte)\n\n self._cache.firmware = sensor.read(_DEVICE_INFO_SERVICE, _FIRMWARE_REVISION).decode(\"utf-8\")\n self._cache.hardware = sensor.read(_DEVICE_INFO_SERVICE, _HARDWARE_REVISION).decode(\"utf-8\")\n self._cache.serial = sensor.read(_DEVICE_INFO_SERVICE, _SERIAL_NUMBER).decode(\"utf-8\")\n self._cache.manufacturer = sensor.read(_DEVICE_INFO_SERVICE, _MANUFACTURER_NAME).decode(\"utf-8\")\n self._cache.first_update = True \n\n\n self._cache.battery_level, = unpack('B', sensor.read(_BATTERY, _BATTERY_LEVEL))\n self._cache.temperature, = unpack('f', sensor.read(_SENSOR_SERVICE, _TEMPERATURE))\n self._cache.pressure, = unpack('f', sensor.read(_SENSOR_SERVICE, _PRESSURE))\n self._cache.humidity_level, = unpack('f', sensor.read(_SENSOR_SERVICE, _HUMIDITY_LEVEL))\n self._cache.co2_level, = unpack('f', sensor.read(_SENSOR_SERVICE, _CO2_LEVEL))\n \n\n self._cache._update_time = datetime.now()\n self._cache.is_cached = True\n\n\n\n \"\"\" \n Function is checking if cache is cached, and updates cache depending on variable 'update_timer'.\n If update timer is enabled, cache will be updated only when cache_timeout overflow, in other case\n cache will be updated every time when function is called\n This principle of updating and chaching , together with class DeviceCache and class Updater is allowing us to save battery lifetime.4\n When cache uptade is over function returns wanted parameter.\n \"\"\"\n def getParameter(self, parameter): \n\n if self.update_timer == True: #checking if update-timer is enabled\n if (self._cache.is_cached == False or self._timedif(self._cache._update_time, datetime.now()) > self._cache._timeout):\n self._cache.is_cached = False\n self.updateAllData()\n else:\n self._cache.is_cached = False\n self.updateAllData()\n\n if self._cache.is_cached:\n\n if parameter == 'battery_level':\n return self._cache.battery_level\n elif parameter == 'temperature':\n return round(self._cache.temperature, 2)\n elif parameter == 'pressure':\n return round(self._cache.pressure, 2)\n elif parameter == 'humidity_level':\n return round(self._cache.humidity_level, 2)\n elif parameter == 'co2_level':\n return round(self._cache.co2_level, 2)\n elif parameter == 'firmware_revision':\n return self._cache.firmware\n elif parameter == 'hardware_revision':\n return self._cache.hardware\n elif parameter == 'serial_number':\n return self._cache.serial\n elif parameter == 'manufacturer_name':\n return self._cache.manufacturer\n else:\n raise ValueError('Unknown request. There is no such parameter')\n else:\n raise ValueError('Chache is empty!')\n\n\n\n def getData(self, service_uuid, char_uuid): #getData returns requested data using BluepyBack.getCharacteristic\n with BleConnection(self._mac) as sensor:\n data = sensor.read(service_uuid, char_uuid) \n return data\n\n\n # Function that sets data to sensor\n def setHandle(self, service_uuid, char_uuid, data):\n with BleConnection(self._mac) as sensor:\n sensor.write(service_uuid, char_uuid, data)\n\n\n \"\"\"Function calculates diference between times when update_cache called last time and current time.\"\"\"\n def _timedif(self, t1,t2=datetime.now()):\n return (t2 - t1).seconds\n","sub_path":"Python/CO2/sensor_connection.py","file_name":"sensor_connection.py","file_ext":"py","file_size_in_byte":7009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"378246129","text":"#%%\nimport pygame\nfrom setting import *\n#%%\nclass Number(pygame.sprite.Sprite):\n def __init__(self,x,y,index):\n super().__init__()\n self.index=index\n self.button_size=110\n self.image=pygame.Surface((self.button_size,self.button_size))\n self.image.fill('white')\n self.rect=self.image.get_rect(x=x,y=y)\n self.surface_rect=self.rect\n \n def text(self,display):\n font=pygame.font.SysFont(None,150)\n self.image=font.render(str(self.index),True,'black')\n self.rect=self.image.get_rect(center=self.surface_rect.center)\n \n def hidden_button(self):\n self.image=pygame.Surface((self.button_size,self.button_size))\n self.rect=self.surface_rect\n self.image.fill('gray')","sub_path":"number_button.py","file_name":"number_button.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"340979240","text":"from point2d import Point2d\nfrom line2d import Line2d\nfrom arc2d import Arc2d\nfrom box2d import Box2d\nfrom utils import Utils\nfrom mold_profile_element import MoldProfileElement\n\nTOLERANCE = 0.00001\n\nclass MoldProfile:\n def __init__(self):\n self.geometry = []\n self.elements = []\n self.start_point = None\n self.end_point = None\n\n def add_geometry(self, geometry):\n self.geometry.append(geometry)\n\n def bounding_box(self):\n box_list = []\n for geometry in self.geometry:\n box_list.append(geometry.bounding_box())\n box = Box2d()\n box.set_from_box_list(box_list)\n return box\n\n def find_start_point(self):\n possible_points = []\n for geom in self.geometry:\n possible_points.append(geom.start_point())\n possible_points.append(geom.end_point())\n\n if not possible_points:\n Utils.error_message(\"Start point could not be found.\")\n\n self.start_point = possible_points[0]\n for point in possible_points:\n if point.x > self.start_point.x:\n self.start_point = point\n\n def put_geometry_in_map(self):\n geometry_map = {}\n counter = 0\n for geom in self.geometry:\n geometry_map[counter] = geom\n counter = counter + 1\n return geometry_map\n\n def find_next_curve(self, geometry_map):\n next_key = None\n is_forward = True\n for key in geometry_map:\n geom = geometry_map[key]\n d0 = geom.start_point().distance_squared(self.end_point)\n if d0 < TOLERANCE * TOLERANCE:\n if next_key is not None:\n Utils.error_message(\"Next geometry already found.\")\n is_forward = True\n next_key = key\n\n d1 = geom.end_point().distance_squared(self.end_point)\n if d1 < TOLERANCE * TOLERANCE:\n if next_key is not None:\n Utils.error_message(\"Next geometry already found.\")\n is_forward = False\n next_key = key\n\n if next_key is None:\n Utils.error_message(\"Next geometry could not be found.\")\n\n new_topo = MoldProfileElement(geometry_map[next_key], is_forward)\n self.elements.append(new_topo)\n self.end_point = new_topo.end_point()\n\n return next_key\n\n def split_arcs(self):\n new_geometry = []\n for geom in self.geometry:\n if geom.type() == 'Arc2d':\n split_arcs = geom.split_at_inflection_points()\n new_geometry = new_geometry + split_arcs\n else:\n new_geometry.append(geom)\n self.geometry = new_geometry\n\n def finalize_profile(self):\n self.elements = []\n if not self.geometry:\n Utils.error_message(\"Profile is empty.\")\n\n self.split_arcs()\n\n self.find_start_point()\n self.end_point = self.start_point\n geometry_map = self.put_geometry_in_map()\n\n while geometry_map:\n index = self.find_next_curve(geometry_map)\n if index is None:\n Utils.error_message(\"Curves could not be connected.\")\n else:\n geometry_map.pop(index, None)\n\n if not geometry_map:\n break\n\n def clone(self):\n profile = MoldProfile()\n for element in self.elements:\n element_copy = element.clone()\n profile.elements.append(element_copy)\n profile.geometry.append(element_copy.geometry)\n profile.start_point = self.start_point.clone()\n profile.end_point = self.end_point.clone()\n return profile\n","sub_path":"mold_profile.py","file_name":"mold_profile.py","file_ext":"py","file_size_in_byte":3708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"420224244","text":"# Game mode Scriptlet for Big Shot\n\nfrom mpf.system.scriptlet import Scriptlet\n\n\nclass Game(Scriptlet):\n\n def on_load(self):\n\n self.machine.events.add_handler('mode_game_started', self.start)\n self.machine.events.add_handler('mode_game_stop', self.stop)\n\n def start(self):\n\n self.machine.events.add_handler('collect_special',\n self.collect_special)\n self.machine.events.add_handler('player_add_success',\n self.player_added)\n self.machine.events.add_handler('ball_started',\n self.ball_started)\n\n # register for classic / modern notification\n self.machine.events.add_handler('enable_classic_mode',\n self.enable_classic_mode)\n self.machine.events.add_handler('enable_modern_mode',\n self.enable_modern_mode)\n\n # register shows for cool effects in modern mode\n self.machine.events.add_handler('drop_targets_Solids_lit_hit',\n self.solid_target_hit) # gabe\n self.machine.events.add_handler('drop_targets_Stripes_lit_hit',\n self.stripe_target_hit) # gabe\n\n # set initial classic / modern mode\n if self.machine.classic_mode:\n self.enable_classic_mode()\n else:\n self.enable_modern_mode()\n\n # turn on the GI\n # for light in self.machine.lights.items_tagged('GI'):\n # light.on()\n\n # Game Over stays on, so we have to turn it off (Gabe did this)\n self.machine.lights['gameOver'].off()\n\n def enable_classic_mode(self):\n pass\n\n def enable_modern_mode(self):\n pass\n\n def stop(self):\n self.machine.events.remove_handler(self.collect_special)\n self.machine.events.remove_handler(self.player_added)\n self.machine.events.remove_handler(self.ball_started)\n self.machine.events.remove_handler(self.enable_classic_mode)\n self.machine.events.remove_handler(self.enable_modern_mode)\n self.machine.events.remove_handler(self.ball_started)\n self.machine.events.remove_handler(self.solid_target_hit) # gabe\n self.machine.events.remove_handler(self.stripe_target_hit) # gabe\n\n def player_added(self, **kwargs):\n self.machine.coils['gameCounter'].pulse()\n\n def ball_started(self, **kwargs):\n self.log.debug(\"Game Scriptlet ball_started()\")\n\n self.set_bonus_lights()\n\n # Need this since Big Shot's plunger lane is not a ball device,\n # so we need to automatically launch a \"live\" ball when the ball\n # starts\n if not self.machine.ball_controller.num_balls_live:\n self.machine.ball_controller.add_live()\n\n # Gabe put this in because we need to make sure the 8 ball lights\n # are turned off when a ball starts. They seem to have a mind of\n # their own since there's no device attached to them\n\n self.machine.lights['ball8'].off()\n self.machine.lights['eightBall500'].off()\n\n def collect_special(self):\n self.machine.coils.knocker.pulse()\n self.machine.game.award_extra_ball()\n\n def set_bonus_lights(self):\n # Used to set the proper playfield light to show the bonus value for\n # that ball\n\n balls_remaining = (self.machine.config['Game']['balls_per_game'] -\n self.machine.game.player.vars['ball'])\n\n if balls_remaining > 1:\n self.machine.lights['bonus1k'].on()\n self.machine.lights['bonus2k'].off()\n self.machine.lights['bonus3k'].off()\n elif balls_remaining == 1:\n self.machine.lights['bonus1k'].off()\n self.machine.lights['bonus2k'].on()\n self.machine.lights['bonus3k'].off()\n else:\n self.machine.lights['bonus1k'].off()\n self.machine.lights['bonus2k'].off()\n self.machine.lights['bonus3k'].on()\n\n # methods below do cool things when modern mode is on.\n\n def solid_target_hit(self): # gabe\n\n if not self.machine.classic_mode:\n\n self.machine.shows['solid_target_hit'].play(repeat=False,\n tocks_per_sec=25,\n priority=1001)\n\n def stripe_target_hit(self): # gabe\n\n if not self.machine.classic_mode:\n\n self.machine.shows['stripe_target_hit'].play(repeat=False,\n tocks_per_sec=25,\n priority=1001)\n\n","sub_path":"big_shot/scriptlets/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":4733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"111546942","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 27 00:00:56 2017\nFrequent Set Mining\n@author: luminous\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport math\nimport matplotlib.pyplot as plt\n\n# import data\ndef importData(inFile): \n data = pd.read_csv(inFile)\n out = {}\n out[\"ID\"] = []\n out[\"Grid\"] = []\n\n minLon = 121.47738 # min(data[\"Longitude\"])\n maxLon = 121.5025075 # max(data[\"Longitude\"])\\\n lonCount = math.ceil((maxLon - minLon) / 0.0001)\n\n minLat = 31.20891667 # min(data[\"Latitude\"])\n maxLat = 31.219175 # max(data[\"Latitude\"])\n latCount = math.ceil((maxLat - minLat) / 0.0001)\n\n for i in range(len(data)):\n # ID of Tel.\n out[\"ID\"].append([data[\"IMSI\"][i]])\n # GPS Grid ID\n x = int((data[\"Longitude\"][i] - minLon) / 0.0001)\n y = int((data[\"Latitude\"][i] - minLat) / 0.0001)\n out[\"Grid\"].append(int(x + y * lonCount))\n \n return out\n\n","sub_path":"hw3/q2_frequentSet.py","file_name":"q2_frequentSet.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"251806609","text":"import re\r\n\r\nimport requests as res\r\nclass Spider:\r\n def __init__(self):\r\n self.session=res.Session()\r\n def get(self,url):\r\n try:\r\n return self.session.get(url)\r\n except Exception as e:\r\n print(e)\r\n print(\"下载错误\")\r\n def run(self,first_url):\r\n for i in range(10):\r\n url=first_url+\"list-1-%s.html\"%(i)\r\n print(url)\r\n response=res.get(first_url)\r\n if response:\r\n html=response.text\r\n img_items=self.get_img_iyems_by_html(html)\r\n print(len(img_items),img_items[0])\r\n for img_item in img_items:\r\n self.save_img(img_item)\r\n print(img_item[\"url\"])\r\n def get_img_iyems_by_html(self,html):\r\n #\"华东大学校花赵梦洁\"\r\n imgs=re.findall(r'',html)\r\n\r\n img_iterms=[]\r\n if imgs:\r\n for img in imgs:\r\n img_item={\r\n \"file_name\":re.findall(r'alt=\"(.*?)\"',img)[0],\r\n \"url\":\"http://www.xiaohuar.com/\"+re.findall(r'src=\"(.*?)\"',img)[0]\r\n }\r\n img_item[\"file_name\"]=img_item[\"file_name\"]+\".\"+img_item[\"url\"].split(\".\")[-1]\r\n img_iterms.append(img_item)\r\n return img_iterms\r\n def save_img(self,img_item):\r\n try:\r\n img_data=self.get(img_item[\"url\"]).content\r\n with open(img_item[\"file_name\"],\"wb\")as f:\r\n f.write(img_data)\r\n except Exception as e:\r\n print(e)\r\n print(\"保存图片错误\")\r\nif __name__==\"__main__\":\r\n first_url=\"http://www.xiaohuar.com/hua\"\r\n spider=Spider()\r\n spider.run(first_url)","sub_path":"gz.py","file_name":"gz.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"271619700","text":"# Cargar una lista con 5 elementos enteros. Imprimir el mayor y un mensaje si \n# se repite dentro de la lista (es decir si dicho valor se encuentra en 2 o más\n# posiciones en la lista) \nlistaEnteros=[]\nfor x in range(5):\n valor=int(input('Valor: '))\n listaEnteros.append(valor)\nmayor=listaEnteros[0]\nrepite=0\nfor y in range(len(listaEnteros)):\n if y>mayor:\n mayor=listaEnteros[y]\n if(mayor==listaEnteros[y]):\n repite+=1\nprint(mayor, repite)","sub_path":"Ejercicio2.py","file_name":"Ejercicio2.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"280400909","text":"\"\"\"\nWe can use the idea of bisection search to determine if a character is in a \nstring, so long as the string is sorted in alphabetical order.\n\nFirst, test the middle character of a string against the character you're \nlooking for (the \"test character\"). If they are the same, we are done - we've \nfound the character we're looking for!\n\nIf they're not the same, check if the test character is \"smaller\" than the \nmiddle character. If so, we need only consider the lower half of the string; \notherwise, we only consider the upper half of the string. (Note that you can \ncompare characters using Python's < function.)\n\nImplement the function isIn(char, aStr) which implements the above idea \nrecursively to test if char is in aStr. char will be a single character and \naStr will be a string that is in alphabetical order. The function should return \na boolean value.\n\"\"\"\ndef isIn(char, aStr):\n '''\n char: a single character\n aStr: an alphabetized string\n \n returns: True if char is in aStr; False otherwise\n '''\n # Your code here\n length = len(aStr)\n half = length // 2\n if length == 0 or length == 1:\n return aStr == char\n elif aStr[half] == char:\n return True\n elif aStr[half] > char:\n return isIn(char, aStr[:half])\n else:\n return isIn(char, aStr[half+1:])\n","sub_path":"mitx_6.00.1x_ics_py/week3/L5.P8.py","file_name":"L5.P8.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"595452970","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 01 22:31:48 2016\n\n@author: xzk\n\"\"\"\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def buildTree(self, preorder, inorder):\n \"\"\"\n :type preorder: List[int]\n :type inorder: List[int]\n :rtype: TreeNode\n \"\"\"\n if inorder:\n idx = inorder.index(preorder.pop(0))\n root = TreeNode(inorder[idx])\n root.left = self.buildTree(preorder, inorder[0:idx])\n root.right = self.buildTree(preorder, inorder[idx+1:])\n return root\n \n def buildTree2(self, preorder, inorder):\n \"\"\"\n :type preorder: List[int]\n :type inorder: List[int]\n :rtype: TreeNode\n The idea is as follows:\n\n 1) Keep pushing the nodes from the preorder into a stack\n (and keep making the tree by adding nodes to the left of \n the previous node) until the top of the stack matches the\n inorder.\n\n 2) At this point, pop the top of the stack until the top \n does not equal inorder (keep a flag to note that you have \n made a pop).\n\n 3) Repeat 1 and 2 until preorder is empty. The key point \n is that whenever the flag is set, insert a node to the \n right and reset the flag.\n \"\"\"\n if not preorder:\n return None\n head = TreeNode(preorder[0])\n stack = [head]\n i, j = 1, 0\n while i < len(preorder):\n temp = None\n node = TreeNode(preorder[i])\n while stack and stack[-1].val == inorder[j]:\n temp = stack.pop()\n j += 1\n if temp:\n temp.right = node\n else:\n stack[-1].left = node\n stack.append(node)\n i += 1\n return head","sub_path":"105Construct Binary Tree from Preorder and Inorder Traversal.py","file_name":"105Construct Binary Tree from Preorder and Inorder Traversal.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"461158419","text":"#Faça um algoritmo que leia 10 números entre 100 e 500 e mostre na tela maior e o menor número. Versão 2:\r\n\r\ni = 1\r\n\r\nwhile i <= 10:\r\n num = int(input('Digite um numero entre 100 e 500: '))\r\n\r\n while num < 100 or num > 500:\r\n num = int(input('O numero nao não esta entre 100 e 500. Digite novamente:'))\r\n\r\n if i == 1:\r\n menor = num\r\n maior = num\r\n else:\r\n if num > maior:\r\n maior = num\r\n if num < menor:\r\n menor = num\r\n i += 1\r\n\r\nprint(f'O menor numero é {menor} e o maior númeor é {maior}')","sub_path":"sclp030.py","file_name":"sclp030.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"198492382","text":"#!/usr/bin/env python2\n# sticking with 2 b/c mne python on skynet?\n\nimport os\nimport glob\nimport re\nimport sys\n\n\n\"\"\"\n20170508 WF -- rewritting getData.bash b/c rest directories were named backward\n\nexpect to be given a raw scan directory (containing protocol dirs that have raw MR* dicoms)\nand an assigned scan number (1 for rac, 2 for dtbz)\n\nneed to parse out protocols for\n - fieldmaps\n - mprages\n - funcs\n - rest\n\noutput directory looks like\n../subjs/10843_20151109/\n fm # handled by constructFM.bash\n {1,2}/{phase,mag}\n func\n 1,..,6\n resting\n 1 <- 24digitdate_session_2digitproto \n 2 <- 24digitdate_session_2digitproto \n struct\n 1\n Sagittal_MPRAGE_ADNI_256x240.13\n Sagittal_MPRAGE_ADNI_G2_256x240.10\n 2\n Sagittal_MPRAGE_ADNI_256x240.20\n Sagittal_MPRAGE_ADNI_G2_256x240.10\n\ncalled from ./01_construct.bash\n\"\"\"\n\nSUBJDIRROOT='/Volumes/Phillips/mMR_PETDA/subjs/'\n\nexpect = {\n 't1' : {'namelike': 'MPRAGE_ADNI' ,'ndcm': 160, 'nprot': 1},\n 't1g2' : {'namelike': 'MPRAGE_ADNI_G2','ndcm': 176, 'nprot': 1},\n 'funcs' : {'namelike': 'ep2d_BOLD_x' ,'ndcm': 200, 'nprot': 6},\n 'rests' : {'namelike': 'resting' ,'ndcm': 320, 'nprot': 2}\n}\n\ndef dcmdirinfo(dcmfull):\n d = os.path.basename(dcmfull)\n s=d.split('.') # break ep2d_BOLD...28 into name and number\n ndcm = len(glob.glob(os.path.join(dcmfull,'MR*'))) # number of dicoms\n pname='.'.join(s[0:-1]) # e.g. ep2d_BOLD_rest...\n pnum = s[-1] # e.g. 28 \n return({'pnum': int(pnum), 'pname': pname, 'ndcm': int(ndcm), 'path': dcmfull})\n\ndef pnumsort(dlist):\n dlist.sort(key=lambda x: x['pnum'])\n return(dlist)\n\ndef filterp(d,p,n):\n f = [ x for x in d if x['ndcm']==n and bool(p in x['pname']) ]\n return( f )\n\ndef basedirfs(basedir):\n if not os.path.isdir(basedir): return([])\n return([ dcmdirinfo(os.path.join(basedir,d)) for d in os.listdir(basedir)])\n \n\ndef embed(dictarray,d):\n for da in dictarray:\n da.update(d)\n return(dictarray)\n \n### actually do stuff\n\ndef getmprage(t1):\n if len(t1) > 0: \n t1 = t1[-1]\n else:\n print('missing mprage')\n return([])\n\n #do mprage stuff\n\n # looks like\n # /Volumes/Phillips/mMR_PETDA/subjs/11517_20160330/struct/1/Sagittal_MPRAGE_ADNI_G2_256x240.10/mprage.nii.gz\n outdir= os.path.join(\n t1['subjdir'],\n 'struct',\n t1['sessionno'],\n \"%s.%s\"%(t1['pname'],t1['pnum']))\n fname= 'mprage.nii.gz'\n fullfile=os.path.join(outdir,fname)\n if os.path.isfile(fullfile): \n print(\"have %s\"%fullfile)\n return([fullfile])\n #otherwise dcmstack\n print(\"would run to create %s\"%fullfile)\n return([])\n\ndef getfuncs(funcs):\n\n # for funcs, we have x{1..6} so we could grab the most recent\n # but if it was a full duration and we redid it there is probably a naming issue\n # so just check we have one of each\n fs=[x['pname'] for x in funcs] \n fsrep= [ x for x in fs if fs.count(x) > 1 ]\n if len(fsrep) >0 :\n for r in fsrep: print(\"func %s is repeated %d times (should be only once)\"%(r,fs.count(r)))\n return([])\n\n # also check that they are in order\n\n\n outdir= os.path.join(\n t1['subjdir'],\n 'func',\n #t1['sessionno'], # no funcs (only rest) in second dtbz scan\n \"%s.%s\"%(t1['pname'],t1['pnum']))\n fname= 'mprage.nii.gz'\n fullfile=os.path.join(outdir,fname)\n if os.path.isfile(fullfile): \n print(\"have %s\"%fullfile)\n return([fullfile])\n\n\nif __name__ == '__main__':\n\n basedir=sys.argv[1]\n\n ## match subject_date and session-time in input base directory\n # and asign to variables\n sinfo =re.search(r'((\\d{5})_(\\d{8}))/([0-9.-]{19})/',basedir)\n if sinfo == None:\n print(\"bad basedir '%s' cannot extract subject info\"%basedir)\n sys.exit(1)\n\n # could use ?P syntax instead of explict naming\n luna_date=sinfo.group(1); subjid=sinfo.group(2); vdate=sinfo.group(3)\n sessionid=sinfo.group(4)\n\n subjdir=os.path.join(SUBJDIRROOT,luna_date)\n\n geninfo={'luna_date': luna_date,'subjdir': subjdir,'sessionno': sys.argv[2]}\n\n # get and sort files in this base dir\n ds= pnumsort( embed( basedirfs( basedir ), geninfo ) )\n \n \n ## make a list of protocols that match our name and count requirements\n # long from looks like:\n # prots['t1'] =filterp(ds,'MPRAGE_ADNI' ,160); ...\n prots={} \n for ek in expect.keys(): \n prots[ek]=filterp(ds, expect[ek]['namelike'], expect[ek]['ndcm'] )\n\n\n ## check we have exactly what we want\n warnbadnum=lambda k,n: \"\" if len(prots[k]) == n else \"%s: not %d (but %d) of %s! (%s)\"%(luna_date,n,len(prots[k]),k,basedir)\n\n # and say something about it\n # long form looks like\n # msg=[ warnbadnum('t1' ,1), warnbadnum('t1g2' ,1), warnbadnum('funcs',6), warnbadnum('rests',2)]\n msg = [ warnbadnum( ek, expect[ek]['nprot']) for ek in expect.keys() ] \n print(\"\\n\".join([x for x in msg if x != \"\"]))\n\n ## deal with things when they are off\n\n # if any are zero bail\n #if any( [len(x)==0 for x in prots.values() ] ):\n # print(\"not gonna try to do anyting when all of something is missing!\")\n # sys.exit(1)\n\n\n\n\n getmprage(prots['t1g2'])\n getmprage(prots['t1'])\n\n getfuncs(prots['funcs'])\n\n\n","sub_path":"getdata.py","file_name":"getdata.py","file_ext":"py","file_size_in_byte":5304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"574032506","text":"import json\n\nimport mock\nimport pytest\n\nfrom filecabinet.fields import FileCabinetFormWidget\nfrom filecabinet import assistent\n\nfrom .. import stubs\nfrom ..helpers import (\n assert_select, assert_script_included, assert_style_included,\n DEFAULT_SITE\n)\n\ndef test_should_render_empty_widget():\n widget = FileCabinetFormWidget()\n html = widget.render('image', None, {'id': 'id_image'})\n\n assert_widget_rendered(html, name='image')\n\ndef test_should_render_widget_with_filedict():\n widget = FileCabinetFormWidget()\n html = widget.render('document',\n stubs.filedict_document_inside_folder,\n {'id': 'id_document'})\n\n value = assistent.dict_extract(stubs.filedict_document_inside_folder,\n 'container', 'path')\n assert_widget_rendered(html, name='document', value=value)\n\ndef test_should_render_widget_with_already_serialized_filedict():\n # if the widget value is already a serialized filedict, just return it\n\n value = assistent.dict_extract(stubs.filedict_document_inside_folder,\n 'container', 'path')\n\n widget = FileCabinetFormWidget()\n html = widget.render('document',\n json.dumps(value),\n {'id': 'id_document'})\n\n assert_widget_rendered(html, name='document', value=value)\n\ndef test_should_raise_ValueError_if_invalid_serialized_filedict():\n # if the widget value is a serialized filedict, but don't have\n # the required keys, raise ValueError\n\n value_with_missing_key = {\n 'container': 'container'\n }\n value_with_extra_key = {\n 'container': 'something',\n 'path': 'me.doc',\n 'extra': 'foo'\n }\n\n widget = FileCabinetFormWidget()\n\n with pytest.raises(ValueError):\n widget.render('document', json.dumps(value_with_missing_key))\n with pytest.raises(ValueError):\n widget.render('document', json.dumps(value_with_extra_key))\n\n\ndef test_should_raise_KeyError_if_invalid_filedict_is_passed_to_render():\n # if the widget value is a dictionary but not with the required\n # keys, raise ValueError\n # it's ok to have extra keys\n\n dict_with_missing_key = {\n 'path': 'foo'\n }\n dict_with_extra_key = {\n 'container': 'container',\n 'path': 'doc.doc',\n 'extra': 'ok',\n }\n\n widget = FileCabinetFormWidget()\n\n with pytest.raises(ValueError):\n widget.render('document', dict_with_missing_key)\n\n # it's ok to have extra keys\n widget.render('document', dict_with_extra_key)\n\ndef test_should_resolve_urls_using_custom_site():\n site = mock.Mock()\n site.reverse.return_value = '/my/fake/url'\n\n widget = FileCabinetFormWidget(filecabinet_site=site)\n html = widget.render('image', None, {'id': 'id_image'})\n\n assert_widget_rendered(html,\n name='image',\n popup_url='/my/fake/url?popup=field',\n url_resolver_service='/my/fake/url')\n\n@mock.patch('django.template.loader.render_to_string')\ndef test_should_include_site_into_template_context( render_to_string):\n # site it's not even used in the template, but could be helpful\n # for somebody extending the template\n\n widget = FileCabinetFormWidget()\n html = widget.render('image', None, {'id': 'id_image'})\n\n args, kwargs = render_to_string.call_args\n context = args[1]\n\n assert DEFAULT_SITE, context.get('filecabinet_site' == 'Site not in context')\n\ndef test_should_include_scripts_and_css_on_media():\n widget = FileCabinetFormWidget()\n\n assert_script_included(widget.media, '/static/filecabinet/js/field-picker.js')\n assert_style_included(widget.media, '/static/filecabinet/css/field.css')\n\n\n# custom asserts\n\ndef assert_widget_rendered(html, name, value='', id='', preview=None,\n popup_url='/admin/filecabinet/list/?popup=field',\n url_resolver_service='/admin/filecabinet/api/urls/'):\n id = id or 'id_' + name\n has_value = bool(value)\n\n # actual hidden input field\n input_selector = ('.filecabinet-field-container input'\n '[id=\"{id}\"]'\n '[name=\"{name}\"]'\n '[type=\"hidden\"]'\n '[class=\"filecabinet-field\"]').format(**locals())\n found = assert_select(html, input_selector)\n found = found[0]\n\n if has_value:\n # make sure expected value was found.\n # must compare here and not on the xpath above because of the lack\n # of ordering on dicts since the value is a JSON\n found_value = json.loads(found.attrib['value'])\n assert value == found_value\n\n if has_value:\n # display value\n value = json.dumps(value)\n display_value_selector = 'span#{id}-value'.format(id=id)\n display_value_text = value\n else:\n # display message to choose file\n display_value_selector = 'span#{id}-value em'.format(id=id)\n display_value_text = 'Click to choose a file'\n\n assert_select(html, display_value_selector, text=display_value_text)\n\n # link to open popup\n popup_link_selector = ('.filecabinet-field-container a'\n '[class=\"filecabinet-popup-field\"]'\n '[href=\"{popup_url}\"]'\n '[data-field-id=\"{id}\"]'\n ' img[src=\"/static/filecabinet/img/popup_open.png\"]'.\n format(**locals()))\n assert_select(html, popup_link_selector)\n\n\n # preview\n preview_selector = ('.filecabinet-field-container div#{id}-preview'\n '[data-url-resolver-service=\"{url_resolver_service}\"]'.\n format(**locals()))\n assert_select(html, preview_selector)\n\n # link to clear field\n assert_remove_link(html, id, visible=has_value)\n\ndef assert_remove_link( html, field_id, visible):\n '''\n The remove link in the widget should be hidden when there is\n no initial value\n '''\n remove_link = assert_select(html, '.filecabinet-field-container a'\n '[href=\"#\"]'\n '[class=\"filecabinet-popup-field-clear\"]'\n '[data-field-id=\"{0}\"]'.format(field_id))\n remove_link = remove_link[0]\n\n if not visible:\n assert 'display:none' == remove_link.attrib.get('style'), \\\n 'Remove link should be visible'\n\n assert_select(remove_link, 'img[src=\"/static/filecabinet/img/field_clear.gif\"]')\n","sub_path":"filecabinet/tests/fields/test_form_widget.py","file_name":"test_form_widget.py","file_ext":"py","file_size_in_byte":6648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"154283676","text":"# pip install python-binance\n\nimport asyncio\nimport json\n\nfrom binance import AsyncClient\n\nasync def main():\n \n client = await AsyncClient.create()\n symbol_info = await client.get_symbol_info('BTCUSDT')\n \n print(json.dumps(symbol_info, indent=2))\n\n await client.close_connection()\n \nif __name__ == \"__main__\":\n \n loop = asyncio.get_event_loop()\n loop.run_until_complete(main())","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"367672548","text":"import openpyxl\r\n\r\n# excel読み込み\r\nwb = openpyxl.load_workbook('./excels/sample.xlsx')\r\n# Sheet名の一覧取得\r\nsheetNameList = wb.get_sheet_names()\r\n# Sheetを取得\r\nsheet1 = wb.get_sheet_by_name('Sheet1')\r\nsheet6 = wb.get_sheet_by_name('Sheet6')\r\n\r\n# セルに追加\r\nsheet6['A1'].value = 'write_test'\r\n# 書き込み保存(excelのファイルを閉じていないと使えない)\r\nwb.save('./excels/sample.xlsx')\r\n","sub_path":"chapter_12_excel_spreadsheets/s04_write_sheet.py","file_name":"s04_write_sheet.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"496921761","text":"#!/usr/bin/python3\n\nimport gi\ngi.require_version(\"Gtk\", \"3.0\")\ngi.require_version('XApp', '1.0')\n\nimport os\nfrom gi.repository import Gio, Gtk, GLib, XApp\n\nUUID = 'app-launcher@mchilli'\nAPP_NAME = \"App Launcher\"\nAPPLET_DIR = os.path.join(os.path.dirname(__file__))\n\nclass EditDialog():\n def __init__(self, variant, groups, item=None):\n self.groups = groups\n self.variant = variant\n if self.variant == 'edit' and item is not None:\n self.item = item\n self.type = 'group' if self.item[0]['is-group'] else 'app'\n else:\n self.type = self.variant\n \n self.name_origin = ''\n self.new_group = None\n\n self.builder = Gtk.Builder()\n self.builder.add_from_file(os.path.join(APPLET_DIR, \"dialogs.glade\"))\n self.builder.connect_signals(self)\n\n self.button_ok = self.builder.get_object(\"%s-ok\" % self.type)\n self.button_cancel = self.builder.get_object(\"%s-cancel\" % self.type)\n\n self.builder.get_object(\"%s-label-name\" % self.type).set_text(_(\"Name\"))\n\n self.name_entry = self.builder.get_object(\"%s-name\" % self.type)\n self.name_entry.connect(\"changed\", self.validate_inputs)\n self.icon_entry = self.builder.get_object(\"%s-icon\" % self.type)\n if self.type == 'app':\n self.builder.get_object(\"app-label-group\").set_text(_(\"Group\"))\n self.builder.get_object(\"app-label-command\").set_text(_(\"Command\"))\n\n self.group_entry = self.builder.get_object(\"app-group\")\n self.command_entry = self.builder.get_object(\"app-command\")\n self.command_entry.connect(\"changed\", self.validate_inputs)\n\n self.group_entry.append('', '') #to display without a group\n self.group_entry.set_active_id('')\n for group in self.groups:\n self.group_entry.append(group, group)\n\n if self.variant == 'edit':\n self.name_entry.set_text(self.item[2])\n self.name_origin = self.item[2]\n try:\n self.icon_entry.set_icon(self.item[1].to_string()) #Used from Settings widget\n except:\n self.icon_entry.set_icon(self.item[1]) #Used from JS Applet\n if self.type == 'app':\n self.group_entry.set_active_id(self.item[0][\"group\"])\n self.command_entry.set_text(self.item[3])\n \n self.validate_inputs()\n\n self.dialog = self.builder.get_object(\"%s-dialog\" % self.type)\n self.dialog.set_title(APP_NAME)\n self.dialog.set_keep_above(True)\n self.dialog.set_position(Gtk.WindowPosition.MOUSE)\n self.dialog.show_all()\n\n def run(self):\n response = self.dialog.run()\n \n if response == Gtk.ResponseType.OK:\n name = self.name_entry.get_text()\n icon = self.icon_entry.get_icon()\n values = {\n \"type\": self.type,\n \"name\": name,\n \"icon\": icon\n }\n if self.type == 'app':\n group = self.group_entry.get_active_id()\n command = self.command_entry.get_text()\n values[\"group\"] = group\n values[\"command\"] = command\n\n self.dialog.destroy()\n return values, self.new_group\n\n self.dialog.destroy()\n return None\n\n def add_group(self, *args):\n data, new_group = EditDialog('group', self.groups).run()\n if data is not None:\n self.group_entry.append(data[\"name\"], data[\"name\"])\n self.group_entry.set_active_id(data[\"name\"])\n self.new_group = {\n \"type\": data[\"type\"],\n \"name\": data[\"name\"],\n \"icon\": data[\"icon\"]\n }\n\n def valid_exec(self, exec):\n try:\n success, parsed = GLib.shell_parse_argv(exec)\n if GLib.find_program_in_path(parsed[0]) or ((not os.path.isdir(parsed[0])) and os.access(parsed[0], os.X_OK)):\n return True\n except:\n pass\n return False\n \n def validate_inputs(self, *args):\n name = self.name_entry.get_text()\n\n valid_name = False\n\n if name.strip() == '':\n self.name_entry.set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, 'gtk-stop')\n self.name_entry.set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY, _(\"The name cannot be empty\"))\n else:\n if self.type == 'group':\n if name != self.name_origin and name in self.groups:\n self.name_entry.set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, 'gtk-stop')\n self.name_entry.set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY, _(\"The name is already used\"))\n else:\n valid_name = True\n else:\n valid_name = True\n if valid_name:\n self.name_entry.set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, 'gtk-ok')\n self.name_entry.set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY, _(\"Valid name\"))\n\n if self.type == 'app':\n valid_exec = self.valid_exec(self.command_entry.get_text())\n if valid_exec:\n self.command_entry.set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, 'gtk-ok')\n self.command_entry.set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY, _(\"Valid command\"))\n else:\n self.command_entry.set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, 'gtk-stop')\n self.command_entry.set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY, _(\"The command is not valid\"))\n\n self.button_ok.set_sensitive(valid_name and (valid_exec if self.type == 'app' else True))\n\nclass ConfirmDialog():\n def __init__(self, icon, name):\n self.builder = Gtk.Builder()\n self.builder.add_from_file(os.path.join(APPLET_DIR, \"dialogs.glade\"))\n self.builder.connect_signals(self)\n\n\n self.icon = self.builder.get_object(\"question-icon\")\n self.icon.set_from_gicon(Gio.Icon.new_for_string(icon), Gtk.IconSize.DIALOG)\n\n self.name = self.builder.get_object(\"question-name\")\n self.name.set_markup(\"%s\" % name)\n\n self.builder.get_object(\"question-label-really\").set_text(_(\"really delete?\"))\n\n self.dialog = self.builder.get_object(\"question-dialog\")\n self.dialog.set_title(APP_NAME)\n self.dialog.set_keep_above(True)\n self.dialog.set_position(Gtk.WindowPosition.MOUSE)\n self.dialog.show_all()\n\n def run(self):\n response = self.dialog.run()\n self.dialog.destroy()\n return response\n\nif __name__ == \"__main__\":\n import sys\n import gettext\n \n sys.path.append('/usr/share/cinnamon/cinnamon-settings/bin')\n from JsonSettingsWidgets import *\n\n # i18n\n gettext.install(UUID, GLib.get_home_dir() + '/.local/share/locale')\n \n try:\n output = None\n if sys.argv[1] == 'edit':\n groups = []\n for group in json.loads(sys.argv[2]):\n groups.append(group[\"name\"])\n item = json.loads(sys.argv[3])\n\n dialog = EditDialog('edit', groups, item)\n output = dialog.run()\n\n elif sys.argv[1] == 'confirm':\n dialog = ConfirmDialog(sys.argv[2], sys.argv[3])\n output = dialog.run()\n\n else:\n raise Exception(\"unkown dialog\")\n print(json.dumps(output))\n except Exception as e:\n print(e)","sub_path":"app-launcher@mchilli/files/app-launcher@mchilli/dialogs.py","file_name":"dialogs.py","file_ext":"py","file_size_in_byte":7603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"87372444","text":"import pytest\nfrom greent.ontologies.go import GO\nfrom greent.servicecontext import ServiceContext\nfrom greent import node_types\nfrom greent.util import Text\n\n@pytest.fixture(scope='module')\ndef go():\n go = GO(ServiceContext.create_context())\n return go\n\ndef test_biological_process(go):\n #Mast Cell Chemotaxis\n go_id = 'GO:0002551'\n assert go.is_biological_process(go_id)\n assert not go.is_cellular_component(go_id)\n assert not go.is_molecular_function(go_id)\n\ndef test_cellular_component(go):\n #Myelin Sheath\n go_id = 'GO:0043209'\n assert not go.is_biological_process(go_id)\n assert go.is_cellular_component(go_id)\n assert not go.is_molecular_function(go_id)\n\ndef test_molecular_function(go):\n #FBXO Family Binding Protein\n go_id = 'GO:0098770'\n assert not go.is_biological_process(go_id)\n assert not go.is_cellular_component(go_id)\n assert go.is_molecular_function(go_id)\n\n","sub_path":"greent/test/test_go.py","file_name":"test_go.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"518919362","text":"# coding: utf-8\n# author: pengshuqin\n# date: 2019-05-08\n# desc: scada对接\n\nimport os\nimport sys\nimport bottle\nimport datetime\nimport setup\nif __name__ == '__main__':\n\tsetup.setCurPath(__file__)\n\t\nimport webutility\nimport scadaUtility\nimport locMgr\nimport taskMgr\nimport taskFilter\nimport meta as m \nimport dbQuery.bigTable as bigTable\n\n\ndef getFeedStatus(s):\n\ts = int(s)\n\tif s == -2:\n\t\treturn \"fail\"\n\telif s == -1:\n\t\treturn \"error\"\n\telif s == 1:\n\t\treturn \"working\"\n\telif s == 0:\n\t\treturn \"finished\"\n\telif s == 2:\n\t\treturn \"waiting\"\n\telse:\n\t\treturn \"\"\n\nbigTable.install(\"agvCtrl\")\n@bigTable.table(\"u_agv_task\",\"PDA任务列表\",domain=False,dateRangeField=\"starttime\",defaultSortId=\"starttime\",defaultSortDir=\"desc\")\n@bigTable.field(id=\"_id\",name=\"任务ID\",sortable=False,searchable=True)\n@bigTable.field(\"taskName\",\"任务名称\",sortable=False,)\n@bigTable.field(\"agvId\",\"AGV\",sortable=False,searchable=True)\n@bigTable.field(\"status\",\"任务状态\",sortable=False,formatFunc=getFeedStatus,searchable=True)\n@bigTable.field(\"starttime\",\"开始时间\",sortable=True,searchable=False)\n@bigTable.field(\"endtime\",\"结束时间\",searchable=False)\n@bigTable.field(\"msg\",\"信息\",sortable=False,searchable=False)\nclass pdaManager(bigTable.manager):\n\tdef __init__(self):\n\t\tpass\n\n@bigTable.table(\"u_scada_task\",\"SCADA任务列表\",domain=False,dateRangeField=\"createTime\",defaultSortId=\"createTime\",defaultSortDir=\"desc\")\n@bigTable.field(id=\"_id\",name=\"任务ID\",sortable=False,searchable=True)\n@bigTable.field(\"taskName\",\"任务名称\",sortable=False,)\n@bigTable.field(\"source\",\"源位置\",sortable=False,searchable=True)\n@bigTable.field(\"target\",\"目标位置\",sortable=False,searchable=True)\n@bigTable.field(\"payloadId\",\"货架\",sortable=False,searchable=True)\n@bigTable.field(\"agvId\",\"AGV\",sortable=False,searchable=True)\n@bigTable.field(\"status\",\"任务状态\",sortable=False,searchable=True)\n@bigTable.field(\"step\",\"任务步骤\",sortable=False,searchable=True)\n@bigTable.field(\"createTime\",\"创建时间\",sortable=True,searchable=False)\n@bigTable.field(\"startTime\",\"开始时间\",searchable=False)\n@bigTable.field(\"endTime\",\"结束时间\",searchable=False)\n@bigTable.field(\"failMsg\",\"信息\",sortable=False,searchable=False)\nclass scadaManager(bigTable.manager):\n\tdef __init__(self):\n\t\tpass\n\n\n\n@scadaUtility.post('/api/scada/addTask')\ndef urlAddTask():\n\tsource = webutility.get_param(\"source\")\n\ttarget = webutility.get_param(\"target\")\n\tpayloadId = webutility.get_param(\"payloadId\")\n\tpriority = webutility.get_param_int(\"priority\")\n\treturn taskFilter.addTask(source,target,payloadId,priority)\n \n@scadaUtility.post('/api/scada/getTask')\ndef urlGetTask():\n\ttaskList = webutility.get_param(\"taskList\")\n\tif taskList != None and type(taskList) is str:\n\t\ttaskList = eval(taskList)\n\n\treturn {\"taskList\": taskMgr.getTaskList(taskList)}\n\t\n@scadaUtility.post('/api/scada/getTask2')\ndef urlGetTask2():\n\treturn {\"list\": taskMgr.getTaskList2()}\n\t\n@scadaUtility.post('/api/scada/allowTask')\ndef urlAllowTask():\n\ttaskId = webutility.get_param(\"taskId\")\n\tlocId = webutility.get_param(\"locId\")\n\treturn taskFilter.allowTask(taskId,locId)\n\t\n@scadaUtility.get('/api/scada/activeAlarm')\ndef urlActiveAlarm():\n\treturn {\"alarmList\": locMgr.getAlarm()}\n\t\n@scadaUtility.post('/api/scada/setLoc')\ndef urlSetLoc():\n\tlocId = webutility.get_param(\"locId\")\n\tpayloadId = webutility.get_param(\"payloadId\")\n\treturn locMgr.setLoc(locId,payloadId)\n\t\n@scadaUtility.get('/api/scada/getLoc')\ndef urlGetLoc():\n\treturn {\"locList\" : locMgr.getLoc()}\n\t\n@scadaUtility.get('/api/scada/fail')# 任务失败\ndef urlCancelTask():\t\n\ttaskId = webutility.get_param(\"taskId\")\n\treturn taskFilter.failTask(taskId)\n\n@scadaUtility.get('/api/scada/finish')# 任务成功\ndef urlFinishTask():\n\ttaskId = webutility.get_param(\"taskId\")\n\treturn taskFilter.finishTask(taskId)\n\t\n@scadaUtility.get('/api/scada/cancelTask')# 转人工\ndef urlSwithchMode():\n\ttaskId = webutility.get_param(\"taskId\")\n\treturn taskFilter.switchMode(taskId)\n\t\n@scadaUtility.get('/api/scada/getTaskStatus')# \ndef urlGetTaskStatus():\n\treturn {\"status\": taskFilter.getTaskStatus()}\n\n@scadaUtility.post('/api/manual/feed')\ndef urlfeed():\n\tparam = {}\n\tparam[\"seat1\"] = webutility.get_param(\"seat1\")\n\tparam[\"location1\"] = webutility.get_param(\"location1\")\n\tparam[\"floorId1\"] = webutility.get_param(\"floorId1\")\n\tparam[\"direction1\"] = webutility.get_param(\"direction1\")\n\tparam[\"seat2\"] = webutility.get_param(\"seat2\")\n\tparam[\"location2\"] = webutility.get_param(\"location2\")\n\tparam[\"floorId2\"] = webutility.get_param(\"floorId2\")\n\tparam[\"direction2\"] = webutility.get_param(\"direction2\")\n\tparam[\"floorId\"] = param[\"floorId1\"]\n\treturn taskFilter.feed(param)\n\n\n@scadaUtility.post('/api/manual/tasklist')\ndef scadaTaskList():\n\treturn taskMgr.getTaskList3()\n\t\n@scadaUtility.get('/api/manual/seat')\ndef getSeat():\n\treturn taskMgr.getSeat()\n\n\t\n#for uwsgi \napp = application = bottle.default_app()\nwebutility.add_ignore('/scada/getTask')\nwebutility.add_ignore('/scada/getTaskStatus')\n\nif __name__ == '__main__':\n\twebutility.run()\nelse:\n\tpass","sub_path":"scadaLink/scadaConnect/main_scadaCtrl.py","file_name":"main_scadaCtrl.py","file_ext":"py","file_size_in_byte":5037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"4219618","text":"# scoreboard for 2048 game\n# Hamandishe Mathivha\n# 1 June 2016\n# 15:11\n\nfrom graphics import *\n\nclass scoreboard:\n \"\"\"display score on screen\"\"\"\n def __init__(self, win, center, width, height):\n \n self.center = center\n \n cx, cy = self.center.getX(), self.center.getY()\n \n \n # drawing rectangle\n p1 = Point(cx - width / 2, cy - height / 2)\n p2 = Point(cx + width / 2, cy + height / 2)\n \n rect = Rectangle(p1, p2)\n rect.setFill(\"white\")\n rect.draw(win)\n \n # placing the value\n self.value = 0\n self.text = Text(center, str(self.value))\n self.text.setFill(\"white\")\n self.text.draw(win)\n \n # Title\n self.title = Text(Point(cx, cy + height / 2 + 2), \"SCORE\")\n self.title.setFill(\"red\")\n self.title.setSize(32)\n self.title.draw(win)\n \n \n def update(self, win, value):\n \"\"\"updates the scoreboard\"\"\"\n text = str(value)\n\n if self.text.getText() != str(value):\n self.text.undraw()\n \n self.text = Text(self.center, text)\n self.text.setFill(\"black\")\n self.text.setSize(18)\n self.text.draw(win)","sub_path":"scoreBoard.py","file_name":"scoreBoard.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"500444950","text":"from system.core.controller import *\nfrom rauth.service import OAuth1Service\nfrom flask import redirect\n\nfacebook = OAuth1Service(\n name='facebook',\n base_url='https://graph.facebook.com/',\n # request_token_url=None,\n access_token_url='/oauth/access_token',\n authorize_url='https://www.facebook.com/dialog/oauth',\n consumer_key='259154491127882',\n consumer_secret='c5b9a2e1e25bfa25abc75a9cd2af450a',\n # request_token_params={'scope': 'email'}\n)\n\nclass Users(Controller):\n def __init__(self, action):\n super(Users, self).__init__(action)\n self.load_model('User')\n self.db = self._app.db\n\n # routes['/'] = \"Users#index\"\n def index(self):\n # check if user is logged in\n return self.load_view('index.html')\n\n # routes['/login'] = \"Users#login\"\n def login(self):\n if 'user' in session:\n return redirect('/')\n return self.load_view('login.html')\n\n # routes['/logout'] = \"Users#logout\"\n def logout(self):\n if 'user' in session:\n session.clear()\n session['user'] = False\n flash('You have successfully logged out','success')\n return redirect('/')\n\n # routes['/user/'] = \"Users#show_user\"\n def show_user(self, user_id):\n if 'user' in session:\n user = self.models['User'].get_user(user_id)\n if user:\n return self.load_view('user.html', user=user)\n return redirect('/')\n return redirect('/')\n\n # routes['/user/inbox'] = \"Users#show_inbox\"\n def show_inbox(self):\n if 'user' in session:\n return self.load_view('inbox.html')\n return redirect('/')\n\n # routes['POST']['/login/process'] = \"Users#login_process\"\n def login_process(self):\n if 'user' in session:\n return redirect('/')\n return facebook.authorize(\n callback=self._app.url_for('oauth_authorized', next=request.args.get('next') or request.referrer or None))\n\n def oauth_authorized(self, resp):\n next_url = request.args.get('next') or self._app.url_for('index')\n if resp is None:\n flash('You denied the request to sign in.', 'error')\n return redirect(next_url)\n\n session['facebook_token'] = (\n resp['oauth_token'],\n resp['oauth_token_secret']\n )\n session['facebook_user'] = resp['screen_name']\n\n flash(\"You signed in as %s\" % resp['screen_name'])\n return redirect(next_url)\n","sub_path":"app/controllers/Users.py","file_name":"Users.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"451137183","text":"import pytest\nfrom qx_test.user.models import User, UserInfo\nfrom qx_base.qx_core.storage import RedisClient\n\n\n@pytest.fixture(scope='session', autouse=True)\n@pytest.mark.django_db()\ndef redis_flushall():\n client = RedisClient().get_conn()\n client.flushall()\n\n\n@pytest.fixture()\ndef user_data_init(db):\n for i in range(10):\n user = User.objects.create_user(\n account=\"1886666888%s\" % i,\n mobile=\"1886666888%s\" % i,\n email=None,\n password=\"12345678\",\n )\n UserInfo.objects.create(\n name=\"test%s\" % i,\n age=i,\n user=user,\n )\n\n\n@pytest.fixture()\ndef signin_request(rf, user_data_init):\n \"\"\"\n 带认证信息的request\n \"\"\"\n def _func(url, method='get', data=None, user_id=None):\n if user_id:\n user = User.objects.get(id=user_id)\n else:\n user = User.objects.get(mobile=\"18866668888\")\n token = user.get_new_token()\n request = getattr(rf, method)(\n url, data=data,\n # HTTP_MYAUTHORIZATION=\"token %s\" % token,\n **{\"HTTP_MYAUTHORIZATION\": \"token %s\" % token},\n content_type='application/json')\n return request\n return _func\n","sub_path":"qx_test/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"554013865","text":"import random\nprint('''\nWelcome to the high-low guessing game.\nI am thinking of an integer between 1 and 100.\n''')\n\nguess = 0\nnumGuesses = 0\nnumber = random.randint(1, 100)\n\nwhile guess != number:\n numGuesses += 1\n guess = int(input('What is your guess? '))\n\n if guess == number:\n print('Congratulations!')\n elif guess > number:\n print('Too high')\n else:\n print('Too low')\n\nprint('You found the number in', numGuesses, 'guesses')\n","sub_path":"simple/src/hi-low-game.py","file_name":"hi-low-game.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"647314341","text":"# https://leetcode-cn.com/problems/permutations/\n# https://medium.com/algorithms-and-leetcode/backtracking-e001561b9f28\n\n\nclass Solution:\n def permute(self, nums):\n ans = []\n temp = []\n self.backtracking(nums, temp, ans)\n return ans\n\n def backtracking(self, nums, temp, ans):\n print(temp)\n if len(nums) == len(temp):\n ans.append(list(temp))\n for i in range(len(nums)):\n if nums[i] in temp:\n continue\n temp.append(nums[i])\n self.backtracking(nums, temp, ans)\n # print('depth', len(temp))\n temp.pop()\n\n\nnums = [1, 2, 3]\nprint(Solution().permute(nums))\n","sub_path":"046-1-Permutations-backtracking.py","file_name":"046-1-Permutations-backtracking.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"425828679","text":"#!/bin/env python\n\"\"\" docstring \"\"\"\nimport sys\nimport os\nimport argparse\nimport logging\nimport copy\nimport subprocess\nimport cPickle as pickle\nimport numpy as np\nimport pdb\nfrom munkres import Munkres\nfrom copy import deepcopy\n_folder_path = os.path.split(os.path.abspath(__file__))[0]\nsys.path.append(_folder_path)\n\n\n###################################################################################\n###################################################################################\n# begin code from http://rosettacode.org/wiki/Stable_marriage_problem#Python\n# under MIT license\n#\n# Implements\n# matchmaker(guys, gals, guyprefers, galprefers)\n# preferences are represented as a dictionary\n# {person -> preference order}\n# where preference order is a list from highest to lowest preference\n# return value is a map\n# {gal -> guy}\n###################################################################################\nguyprefers = {\n 'abe': ['abi', 'eve', 'cath', 'ivy', 'jan', 'dee', 'fay', 'bea', 'hope', 'gay'],\n 'bob': ['cath', 'hope', 'abi', 'dee', 'eve', 'fay', 'bea', 'jan', 'ivy', 'gay'],\n 'col': ['hope', 'eve', 'abi', 'dee', 'bea', 'fay', 'ivy', 'gay', 'cath', 'jan'],\n 'dan': ['ivy', 'fay', 'dee', 'gay', 'hope', 'eve', 'jan', 'bea', 'cath', 'abi'],\n 'ed': ['jan', 'dee', 'bea', 'cath', 'fay', 'eve', 'abi', 'ivy', 'hope', 'gay'],\n 'fred': ['bea', 'abi', 'dee', 'gay', 'eve', 'ivy', 'cath', 'jan', 'hope', 'fay'],\n 'gav': ['gay', 'eve', 'ivy', 'bea', 'cath', 'abi', 'dee', 'hope', 'jan', 'fay'],\n 'hal': ['abi', 'eve', 'hope', 'fay', 'ivy', 'cath', 'jan', 'bea', 'gay', 'dee'],\n 'ian': ['hope', 'cath', 'dee', 'gay', 'bea', 'abi', 'fay', 'ivy', 'jan', 'eve'],\n 'jon': ['abi', 'fay', 'jan', 'gay', 'eve', 'bea', 'dee', 'cath', 'ivy', 'hope']}\ngalprefers = {\n 'abi': ['bob', 'fred', 'jon', 'gav', 'ian', 'abe', 'dan', 'ed', 'col', 'hal'],\n 'bea': ['bob', 'abe', 'col', 'fred', 'gav', 'dan', 'ian', 'ed', 'jon', 'hal'],\n 'cath': ['fred', 'bob', 'ed', 'gav', 'hal', 'col', 'ian', 'abe', 'dan', 'jon'],\n 'dee': ['fred', 'jon', 'col', 'abe', 'ian', 'hal', 'gav', 'dan', 'bob', 'ed'],\n 'eve': ['jon', 'hal', 'fred', 'dan', 'abe', 'gav', 'col', 'ed', 'ian', 'bob'],\n 'fay': ['bob', 'abe', 'ed', 'ian', 'jon', 'dan', 'fred', 'gav', 'col', 'hal'],\n 'gay': ['jon', 'gav', 'hal', 'fred', 'bob', 'abe', 'col', 'ed', 'dan', 'ian'],\n 'hope': ['gav', 'jon', 'bob', 'abe', 'ian', 'dan', 'hal', 'ed', 'col', 'fred'],\n 'ivy': ['ian', 'col', 'hal', 'gav', 'fred', 'bob', 'abe', 'ed', 'jon', 'dan'],\n 'jan': ['ed', 'hal', 'gav', 'abe', 'bob', 'jon', 'col', 'ian', 'fred', 'dan']}\n\nguys = sorted(guyprefers.keys())\ngals = sorted(galprefers.keys())\n\n\ndef check(engaged):\n inverseengaged = dict((v,k) for k,v in engaged.items())\n for she, he in engaged.items():\n shelikes = galprefers[she]\n shelikesbetter = shelikes[:shelikes.index(he)]\n helikes = guyprefers[he]\n helikesbetter = helikes[:helikes.index(she)]\n for guy in shelikesbetter:\n guysgirl = inverseengaged[guy]\n guylikes = guyprefers[guy]\n if guylikes.index(guysgirl) > guylikes.index(she):\n print(\"%s and %s like each other better than \"\n \"their present partners: %s and %s, respectively\"\n % (she, guy, he, guysgirl))\n return False\n for gal in helikesbetter:\n girlsguy = engaged[gal]\n gallikes = galprefers[gal]\n if gallikes.index(girlsguy) > gallikes.index(he):\n print(\"%s and %s like each other better than \"\n \"their present partners: %s and %s, respectively\"\n % (he, gal, she, girlsguy))\n return False\n return True\n\n# returns engagements = {gal: guy}\ndef matchmaker(guys, gals, guyprefers, galprefers):\n guysfree = guys[:]\n engaged = {}\n guyprefers2 = copy.deepcopy(guyprefers)\n galprefers2 = copy.deepcopy(galprefers)\n while guysfree:\n guy = guysfree.pop(0)\n guyslist = guyprefers2[guy]\n gal = guyslist.pop(0)\n fiance = engaged.get(gal)\n if not fiance:\n # She's free\n engaged[gal] = guy\n print(\" %s and %s\" % (guy, gal))\n else:\n # The bounder proposes to an engaged lass!\n galslist = galprefers2[gal]\n if galslist.index(fiance) > galslist.index(guy):\n # She prefers new guy\n engaged[gal] = guy\n print(\" %s dumped %s for %s\" % (gal, fiance, guy))\n if guyprefers2[fiance]:\n # Ex has more girls to try\n guysfree.append(fiance)\n else:\n # She is faithful to old fiance\n if guyslist:\n # Look again\n guysfree.append(guy)\n return engaged\n\ntest_rosetta_stable_marriage = False\nif (test_rosetta_stable_marriage):\n print('\\nEngagements:')\n engaged = matchmaker(guys, gals, guyprefers, galprefers)\n\n print('\\nCouples:')\n print(' ' + ',\\n '.join('%s is engaged to %s' % couple\n for couple in sorted(engaged.items())))\n print()\n print('Engagement stability check PASSED'\n if check(engaged) else 'Engagement stability check FAILED')\n\n print('\\n\\nSwapping two fiances to introduce an error')\n engaged[gals[0]], engaged[gals[1]] = engaged[gals[1]], engaged[gals[0]]\n for gal in gals[:2]:\n print(' %s is now engaged to %s' % (gal, engaged[gal]))\n print()\n print('Engagement stability check PASSED'\n if check(engaged) else 'Engagement stability check FAILED')\n\n###################################################################################\n# end code from rosetta\n###################################################################################\n###################################################################################\n\ndef weights_to_prefs(people, weights):\n assert(len(people) == len(weights))\n weight_pairs = [[person, weights[person]] for person in people]\n weight_pairs = sorted(weight_pairs, cmp = lambda x,y: -cmp(x[1], y[1]))\n prefs = map(lambda weight_pair: weight_pair[0], weight_pairs)\n return prefs\n\n##############################################################\n# Reads an overlap.tab file, as written by segtools-overlap.\n# Returns:\n# guys - names corresponding with columns in the tab file\n# gals - names corresponding with rows in the tab file\n# guy_prefs: mapping from guy to list of gals, in order of preference\n# guy_prefs: mapping from gal to list of guys, in order of preference\n# weight_mat: 2D dictionary of overlaps; indexed weight_mat[gal][guy]\n##############################################################\ndef tab_file_to_preferences(filename, guys, gals):\n data = open(filename).readlines()\n raw_mat = map(lambda row: row.split(), data)\n\n # the first line is a comment\n raw_mat = raw_mat[1:]\n\n # the second line is a list of guys\n guys_from_file = raw_mat[0][:-2]\n raw_mat = raw_mat[1:]\n if not guys:\n guys = guys_from_file\n\n # the first column is a list of gals\n gals_from_file = [raw_mat[i][0] for i in range(len(raw_mat))]\n raw_mat = [raw_mat[i][1:] for i in range(len(raw_mat))]\n if not gals:\n gals = gals_from_file\n\n # the last two columns are a \"none\" and \"total\"\n raw_mat = [raw_mat[i][:-2] for i in range(len(raw_mat))]\n\n weight_mat = {gal:{guy:0 for guy in guys} for gal in gals}\n for gal_index in range(len(raw_mat)):\n gal = gals_from_file[gal_index]\n for guy_index in range(len(raw_mat[gal_index])):\n guy = guys_from_file[guy_index]\n val = int(raw_mat[gal_index][guy_index])\n weight_mat[gal][guy] = val\n\n galprefs = {}\n for gal in gals:\n galprefs[gal] = weights_to_prefs(guys, weight_mat[gal])\n guyprefs = {}\n for guy in guys:\n guyprefs[guy] = weights_to_prefs(gals, {gal:weight_mat[gal][guy]\n for gal in gals})\n\n return guys, gals, guyprefs, galprefs, weight_mat\n\n##############################################################\n# This function is used when one of the two mapping algorithms (hungarian\n# or stable marriage) have already generated a mapping (engagements).\n# This function returns the optimal ordering of the guy-gal pairs\n# along the diagonal of the confusion matrix such that the weight\n# far from the diagonal of the confusion matrix is minimized. This\n# problem can be solved by finding the order of the eigenvalues of\n# the Laplacian matrix. For more info, see:\n# http://cstheory.stackexchange.com/questions/8878/algorithm-for-ordering-a-list-under-a-similarity-function\n##############################################################\ndef eigen_order(guys, gals, weight_mat, engagements):\n overlap_mat = [[-weight_mat[gal][guy]\n for guy in guys]\n for gal in gals]\n overlap_mat = np.matrix(overlap_mat)\n # order rows such that the guy-gal engagements appear on the diagonal\n rev_engagements = {engagements[gal]:gal for gal in gals}\n row_order = [gals.index(rev_engagements[guy]) for guy in guys]\n overlap_mat_ord = overlap_mat[row_order]\n\n # make the matrix symmetrical by adding it to its transpose\n overlap_mat_symm = (overlap_mat_ord + overlap_mat_ord.T)/2\n\n # diagonal := sum(overlaps)\n for i in range(overlap_mat_symm.shape[0]):\n overlap_mat_symm[i,i] = sum(overlap_mat_symm[i].flat) - overlap_mat_symm[i,i]\n\n # order the diagonal in the order of the eigenvalues of the matrix\n eigval, eigvec = np.linalg.eig(overlap_mat_symm)\n diag_order = map(lambda x: x[1],\n sorted([(eigval[i], i) for i in range(len(eigval))],\n key = lambda x: x[0]))\n #ret = overlap_mat_symm[diag_order].T[diag_order].T\n\n #print >>sys.stderr, \"rev_engagements:\", rev_engagements\n\n guy_order = [guys[i] for i in diag_order]\n gal_order = [rev_engagements[guy] for guy in guy_order]\n\n return guy_order, gal_order\n\ndef hungarian(guys, gals, weight_mat):\n neg_weight_mat = [[-weight_mat[gal][guy]\n for guy in guys]\n for gal in gals]\n m = Munkres()\n indexes = m.compute(neg_weight_mat)\n #print >>sys.stderr, \"indexes:\", indexes\n #engagements = {guys[i] : gals[j] for i,j in indexes}\n engagements = {gals[j] : guys[i] for i,j in indexes}\n #print >>sys.stderr, \"engagements: \", engagements\n\n return engagements\n\n\ndef main():\n parser = argparse.ArgumentParser(description='docstring')\n parser.add_argument('input_overlap', type=str, action='store', help='argument help')\n parser.add_argument('mapping', type=str, action='store', help='argument help')\n parser.add_argument('guy_order')\n parser.add_argument('gal_order')\n parser.add_argument('--guys', default=None, help=\"comma-delimited list of guys\")\n parser.add_argument('--gals', default=None, help=\"comma-delimited list of gals\")\n parser.add_argument('--algorithm', choices=[\"stable_marriage\", \"hungarian\"], default=\"stable_marriage\")\n parser.add_argument('--verbose', action='store_true')\n args = parser.parse_args()\n\n if args.guys: args.guys = args.guys.split(\",\")\n if args.gals: args.gals = args.gals.split(\",\")\n\n guys, gals, guyprefs, galprefs, weight_mat = tab_file_to_preferences(args.input_overlap, args.guys, args.gals)\n\n if args.algorithm == \"stable_marriage\":\n engagements = matchmaker(guys, gals, guyprefs, galprefs)\n elif args.algorithm == \"hungarian\":\n engagements = hungarian(guys, gals, weight_mat)\n\n total_bases = sum([sum(weight_mat[gal].values()) for gal in weight_mat])\n #diagonal_bases = sum([weight_mat[gal][engagements[gal]] for gal in weight_mat])\n diagonal_bases = sum([weight_mat[engagements[gal]][gal] for gal in weight_mat]) # I'm 99% sure this is the right one (8/24/12)\n if args.verbose:\n print >>sys.stderr, \"diagonal_bases: \", diagonal_bases\n print >>sys.stderr, \"total_bases: \", total_bases\n assert (total_bases != 0)\n diagonal_fraction = float(diagonal_bases) / total_bases\n sys.stdout.write(str(diagonal_fraction))\n\n\n guy_order, gal_order = eigen_order(guys, gals, weight_mat, engagements)\n if args.verbose:\n print >>sys.stderr, \"guy_order:\", guy_order\n print >>sys.stderr, \"gal_order:\", gal_order\n print >>sys.stderr, \"set(zip(guy_order, gal_order)):\", set(zip(gal_order, guy_order))\n print >>sys.stderr, \"set(engagements.items())\", set(engagements.items())\n assert set(zip(gal_order, guy_order)) == set(engagements.items())\n\n with open(args.guy_order, \"w\") as f: pickle.dump(guy_order, f)\n with open(args.gal_order, \"w\") as f: pickle.dump(gal_order, f)\n\n with open(args.mapping, \"w\") as f:\n f.write(\"# first column is objects read from left side of tab file\\n\")\n f.write(\"# second column is objects read from top of tab file\\n\")\n for x in engagements:\n if args.verbose:\n print >>sys.stderr, \"%s is paired with %s\" % (x, engagements[x])\n f.write(\"%s\\t%s\\n\" % (x, engagements[x]))\n\n #preferences_to_tab_file(guys, gals, engagements, args.input_overlap, args.sorted_overlap)\n\n\n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n\n","sub_path":"stablemarriage/stable_marriage.py","file_name":"stable_marriage.py","file_ext":"py","file_size_in_byte":13373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"649899264","text":"from dllist import DoubleLinkedList\n\ndef bubble_sort(numbers):\n \"\"\"Sorts a list of numbers using bubble sort\"\"\"\n while True:\n number = numbers.begin\n count = numbers.count()\n for i in range(0, count):\n if number.value > number.next.value:\n temp = number.value\n number.value = number.next.value\n number.next.value = temp\n return numbers\n\n\ndef count(node):\n count = 0\n\n while node:\n node = node.next\n count += 1\n\n return count\n\ndef merge_sort(numbers):\n# function merge_sort(list m)\n count = numbers.count()\n \n if count <= 1:\n # if length of m ≤ 1 then\n return numbers\n # return m\n # var left := empty list\n left_numbers = DoubleLinkedList()\n # var right := empty list\n right_numbers = DoubleLinkedList()\n # for each x with index i in m do\n i = 0\n node = numbers.begin\n # if i < (length of m)/2 then\n # add x to left\n for i in range(0, count):\n # print(\">>> i=\", i)\n rc = node.value\n if i < (count // 2):\n left_numbers.push(rc)\n # else\n # add x to right\n else:\n right_numbers.push(rc)\n node = node.next\n \n # left := merge_sort(left)\n left_numbers = merge_sort(left_numbers)\n # right := merge_sort(right)\n right_numbers = merge_sort(right_numbers)\n # return merge(left, right)\n return merge(left_numbers, right_numbers)\n\n\n# function merge(left, right)\ndef merge(left, right):\n # var result := empty list\n rc = DoubleLinkedList()\n # while left is not empty and right is not empty do\n while left.begin and right.begin:\n # if first(left) ≤ first(right) then\n if left.begin.value <= right.begin.value:\n # append first(left) to result\n # left := rest(left)\n rc.push(left.unshift())\n # else\n else:\n # append first(right) to result\n # right := rest(right)\n rc.push(right.unshift())\n # while left is not empty do\n while left.begin:\n # append first(left) to result\n # left := rest(left)\n rc.push(left.unshift())\n # while right is not empty do\n while left.begin:\n # append first(right) to result\n #right := rest(right)\n rc.push(right.unshift())\n # return result\n return rc\n\n\n# quickSort(array, leftmostIndex, rightmostIndex)\ndef quick_sort(array, lo, hi):\n# if (leftmostIndex < rightmostIndex)\n if (lo < hi):\n # pivotIndex <- partition(array,leftmostIndex, rightmostIndex)\n p = partition(array, lo, hi)\n # quickSort(array, leftmostIndex, pivotIndex)\n quick_sort(array, lo, p-1)\n # quickSort(array, pivotIndex + 1, rightmostIndex)\n quick_sort(array, p + 1, hi)\n\n\n# partition(array, leftmostIndex, rightmostIndex)\ndef partition(array, lo, hi):\n # print(\">>>> Enter into partition\")\n # print(\"> array section is \", array[lo:hi+1])\n # print(\"> low is \", lo)\n # print(\"> high is \", hi)\n# set rightmostIndex as pivotIndex\n pivot = array[hi]\n # print(\"> p value is \", pivot)\n# storeIndex <- leftmostIndex - 1\n i = lo -1\n# for i <- leftmostIndex + 1 to rightmostIndex\n for j in range (lo + 1, hi +1):\n# if element[i] < pivotElement\n if array[j] < pivot:\n i += 1\n# swap element[i] and element[storeIndex]\n array[j], array[i] = array[i], array[j]\n\n# swap pivotElement and element[storeIndex+1]\n array[i+1], array[hi] = array[hi], array[i+1]\n # print(\"<<<< Leave the partition.\")\n # print(\"== array is now == \", array)\n# return storeIndex + 1\n return i + 1\n","sub_path":"ex18/sorting.py","file_name":"sorting.py","file_ext":"py","file_size_in_byte":3688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"365771660","text":"import unittest\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\n\nclass TestKeys(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.driver = webdriver.Chrome()\n cls.driver.get('https://www.baidu.com/')\n cls.driver.maximize_window()\n\n @classmethod\n def tearDownClass(cls):\n cls.driver.quit()\n\n def test_keys(self):\n self.driver.find_element_by_id('kw').send_keys('seleniumtest')\n time.sleep(3)\n self.driver.find_element_by_id('kw').send_keys(Keys.BACK_SPACE)\n time.sleep(3)","sub_path":"python基础代码/SeleniumDemo/testcases/test_keys.py","file_name":"test_keys.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"105368109","text":"# lambda 参数:操作(参数)\n\nadd = lambda x, y: x + y\nprint(add(3, 5))\n\n# 列表排序\na = [(1, 2), (4, 1), (9, 10), (13, -3)]\na.sort(key=lambda x: x[1])\nprint(a)\n\n# 列表并行排序\nlist1 = [1, 5, 3]\nlist2 = [2, 4, 6]\ndata = zip(list1, list2)\nsorted(data)\nlist1, list2 = map(lambda t: list(t), zip(*data))\nprint(list1)\nprint(list2)\n","sub_path":"intermediate-python/lambada.py","file_name":"lambada.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"377833370","text":"import re, sys, os, logging, numpy as np, pandas as pd\n\n\ndef getParams(paramFile):\n parameters = dict()\n with open(paramFile, 'r') as file:\n for line in file:\n if re.search(r'^#', line) or re.search(r'^\\s', line):\n continue\n line = re.sub(r'#.*', '', line) # Remove comments (start from '#')\n line = re.sub(r'\\s*', '', line) # Remove all whitespaces\n\n # Exception for \"feature_files\" parameter\n if \"feature_files\" in parameters and line.endswith(\".feature\") and \"=\" not in line:\n parameters[\"feature_files\"].append(line)\n elif \"library\" in parameters and line.endswith(\".db\") and \"=\" not in line:\n parameters[\"library\"].append(line)\n else:\n key = line.split('=')[0]\n val = line.split('=')[1]\n if key == \"feature_files\" or key == \"library\":\n parameters[key] = [val]\n else:\n parameters[key] = val\n return parameters\n\n\ndef readFeatures(featureFile):\n with open(featureFile, 'r') as file:\n features = file.readlines()\n return features\n\n\nclass progressBar:\n def __init__(self, total):\n self.total = total\n self.barLength = 20\n self.count = 0\n self.progress = 0\n self.block = 0\n self.status = \"\"\n\n def increment(self, nIncrement=None):\n if nIncrement == None:\n self.count += 1\n else:\n self.count = nIncrement\n self.progress = self.count / self.total\n self.block = int(round(self.barLength * self.progress))\n if self.progress == 1:\n self.status = \"Done...\\r\\n\"\n else:\n self.status = \"\"\n # self.status = str(self.count) + \"/\" + str(self.total)\n text = \"\\r Progress: [{0}] {1}% {2}\".format(\"#\" * self.block + \"-\" * (self.barLength - self.block),\n int(self.progress * 100), self.status)\n sys.stdout.write(text)\n sys.stdout.flush()\n\n\ndef summarizeFeatures(full, params):\n # Input arguments\n # 1. full: numpy recarray of fully-aligned features\n # 2. params: dictionary of parameters\n\n ####################################\n # Summarize fully-aligned features #\n ####################################\n mzCols = [col for col in full.dtype.names if col.lower().endswith(\"_mz\")]\n rtCols = [col for col in full.dtype.names if col.lower().endswith(\"_rt\")]\n intensityCols = [col for col in full.dtype.names if col.lower().endswith(\"_intensity\")]\n chargeCols = [col for col in full.dtype.names if col.lower().endswith(\"_z\")]\n minRtCols = [col for col in full.dtype.names if col.lower().endswith(\"_minrt\")]\n maxRtCols = [col for col in full.dtype.names if col.lower().endswith(\"_maxrt\")]\n snCols = [col for col in full.dtype.names if col.lower().endswith(\"_snratio\")]\n\n df = pd.DataFrame.from_records(full)\n res = pd.DataFrame()\n res[\"feature_m/z\"] = df[mzCols].mean(axis=1)\n res[\"feature_RT\"] = df[rtCols].mean(axis=1) / 60\n res[\"feature_intensity\"] = df[intensityCols].mean(axis=1)\n res[\"feature_SNratio\"] = df[snCols].mean(axis=1)\n res[\"feature_width\"] = pd.DataFrame((df[maxRtCols].values - df[minRtCols].values) / 60).mean(axis=1)\n # Handling charges\n res[\"feature_z\"] = df[chargeCols].mode(axis=1)[0]\n res[\"feature_z\"][res[\"feature_z\"] == 0] = 1\n res[\"feature_z\"] = res[\"feature_z\"].astype(int)\n if params[\"mode\"] == \"-1\":\n res[\"feature_ion\"] = \"[M-\" + res[\"feature_z\"].astype(str) + \"H]\" + res[\"feature_z\"].astype(str) + \"-\"\n res[\"feature_ion\"] = res[\"feature_ion\"].replace(\"[M-1H]1-\", \"[M-H]-\")\n elif params[\"mode\"] == \"1\":\n res[\"feature_ion\"] = \"[M+\" + res[\"feature_z\"].astype(str) + \"H]\" + res[\"feature_z\"].astype(str) + \"+\"\n res[\"feature_ion\"] = res[\"feature_ion\"].replace(\"[M+1H]1+\", \"[M+H]+\")\n for intensityCol in intensityCols:\n res[intensityCol] = df[intensityCol]\n colNames = [\"feature_ion\", \"feature_z\", \"feature_m/z\", \"feature_RT\", \"feature_width\",\n \"feature_SNratio\"] + intensityCols\n res = res[colNames]\n return res\n\n\n'''\ndef generateSummarizedFeatureFile(nFeatures, full, ms2, params):\n filePath = os.path.join(os.getcwd(), \"align_\" + params[\"output_name\"])\n\n #############################################################\n # Summarize fully-aligned features and write them to a file #\n #############################################################\n # This file contains \"summarized\" information of fully-aligned features\n # e.g. mean m/z, mean intensity, mean RT of fully-aligned features and so on\n # width and SNratio are from the reference run\n mzCols = [col for col in full.dtype.names if col.lower().endswith(\"_mz\")]\n rtCols = [col for col in full.dtype.names if col.lower().endswith(\"_rt\")]\n intensityCols = [col for col in full.dtype.names if col.lower().endswith(\"_intensity\")]\n chargeCols = [col for col in full.dtype.names if col.lower().endswith(\"_z\")]\n minRtCols = [col for col in full.dtype.names if col.lower().endswith(\"_minrt\")]\n maxRtCols = [col for col in full.dtype.names if col.lower().endswith(\"_maxrt\")]\n snCols = [col for col in full.dtype.names if col.lower().endswith(\"_snratio\")]\n\n df = pd.DataFrame.from_records(full)\n res = pd.DataFrame()\n res[\"feature_m/z\"] = df[mzCols].mean(axis=1)\n res[\"feature_RT\"] = df[rtCols].mean(axis=1)\n res[\"feature_intensity\"] = df[intensityCols].mean(axis=1)\n res[\"feature_SNratio\"] = df[snCols].mean(axis=1)\n res[\"feature_width\"] = pd.DataFrame((df[maxRtCols].values - df[minRtCols].values) / 60).mean(axis=1)\n # Handling charges\n res[\"feature_z\"] = df[chargeCols].mode(axis=1)[0]\n res[\"feature_z\"][res[\"feature_z\"] == 0] = 1\n res[\"feature_z\"] = res[\"feature_z\"].astype(int)\n if params[\"mode\"] == \"-1\":\n res[\"feature_ion\"] = \"[M-\" + res[\"feature_z\"].astype(str) + \"H]\" + res[\"feature_z\"].astype(str) + \"-\"\n res[\"feature_ion\"] = res[\"feature_ion\"].replace(\"[M-1H]1-\", \"[M-H]-\")\n elif params[\"mode\"] == \"1\":\n res[\"feature_ion\"] = \"[M+\" + res[\"feature_z\"].astype(str) + \"H]\" + res[\"feature_z\"].astype(str) + \"+\"\n res[\"feature_ion\"] = res[\"feature_ion\"].replace(\"[M+1H]1+\", \"[M+H]+\")\n for intensityCol in intensityCols:\n res[intensityCol] = df[intensityCol]\n\n # Add the mean m/z of feature and its charge state to the beginning of MS2 spectrum (similar to .dta file)\n for i in range(nFeatures):\n if ms2[i] is not None:\n ms2[i][\"mz\"] = np.insert(ms2[i][\"mz\"], 0, res[\"feature_m/z\"].iloc[i])\n ms2[i][\"intensity\"] = np.insert(ms2[i][\"intensity\"], 0, res[\"feature_z\"].iloc[i])\n res[\"MS2\"] = ms2\n\n # Write the summarized fully-aligned features to a file\n res = res.sort_values(by=\"feature_m/z\", ignore_index=True)\n res[\n \"feature_num\"] = res.index + 1 # Update \"feature_num\" according to the ascending order of \"feature_m/z\" (as sorted)\n outColumns = [\"feature_num\", \"feature_ion\", \"feature_z\", \"feature_m/z\", \"feature_RT\",\n \"feature_width\", \"feature_SNratio\"] + intensityCols\n resOut = res[outColumns].copy()\n resOut[\"feature_RT\"] = resOut[\"feature_RT\"] / 60 # Change the unit to minute\n fullName = os.path.join(filePath, params[\"output_name\"] + \"_summarized_fully_aligned.feature\")\n resOut.to_csv(fullName, index=False, sep=\"\\t\")\n\n # Write MS2 spectra to files\n ms2Path = os.path.join(filePath, \"MS2\")\n if not os.path.exists(ms2Path):\n os.mkdir(ms2Path)\n for i in range(res.shape[0]):\n if res[\"MS2\"].iloc[i] is not None:\n fileName = os.path.join(ms2Path, \"f\" + str(i + 1) + \".MS2\")\n dfMS2 = pd.DataFrame.from_dict(res[\"MS2\"].iloc[i])\n dfMS2.to_csv(fileName, index=False, header=False, sep=\"\\t\")\n\n # Save fully-aligned features with their MS2 spectra (i.e. res) for debugging purpose\n # When the pipeline gets mature, this part needs to be removed\n pickle.dump(res, open(os.path.join(filePath, \".fully_aligned_feature.pickle\"), \"wb\")) # Make the file be hidden\n\n return res\n'''\n\n\ndef processQuantityData(df, params):\n print(\" Loading-bias summary\")\n print(\" ====================\")\n logging.info(\" Loading-bias summary\")\n logging.info(\" ====================\")\n intensityCols = [col for col in df.columns if col.lower().endswith(\"_intensity\")]\n expr = df[intensityCols]\n\n # Calculation and print-out loading bias information\n sampleNames = expr.columns\n rowMeans = expr.mean(axis=1)\n lexpr = np.log2(expr.div(rowMeans, axis=0))\n nFeatures, nSamples = lexpr.shape\n idx = pd.Series([True] * nFeatures)\n for i in intensityCols:\n idx = idx & (lexpr[i] > lexpr[i].quantile(q=0.1)) & (lexpr[i] < lexpr[i].quantile(q=0.9))\n meanIntensity = 2 ** (lexpr.loc[idx, :].mean(axis=0)) * 100\n sdVal = lexpr.loc[idx, :].std(axis=0)\n sdIntensity = ((2 ** sdVal - 1) + (1 - 2 ** (-sdVal))) / 2 * 100\n semIntensity = sdIntensity / np.sqrt(len(idx))\n print(\" Sample_name\\tMean[%]\\tSD[%]\\tSEM[%]\\t#features\")\n logging.info(\" Sample_name\\tMean[%]\\tSD[%]\\tSEM[%]\\t#features\")\n for i in range(expr.shape[1]):\n print(\" {}\\t{:.2f}\\t{:.2f}\\t{:.2f}\\t{}\".format(intensityCols[i].replace(\"_intensity\", \"\"), meanIntensity[i], sdIntensity[i], semIntensity[i], len(idx)))\n logging.info(\" {}\\t{:.2f}\\t{:.2f}\\t{:.2f}\\t{}\".format(intensityCols[i].replace(\"_intensity\", \"\"), meanIntensity[i], sdIntensity[i], semIntensity[i], len(idx)))\n\n # Normalization using trimmed-mean values (loading-bias correction)\n lexpr = np.log2(expr)\n if params[\"skip_loading_bias_correction\"] == 0:\n # Parameters for normalization\n cutoff = np.nanquantile(lexpr.to_numpy(), q=0.1) # 10% quantile of overall intensities\n # This is the original implementation (as the same as Perl-pipeline), but it may be too stringent\n idx = pd.Series([True] * nFeatures)\n for i in intensityCols:\n idx = idx & (lexpr[lexpr > cutoff][i] > lexpr[lexpr > cutoff][i].quantile(q=0.1)) & (\n lexpr[lexpr > cutoff][i] > lexpr[lexpr > cutoff][i].quantile(q=0.9))\n meanIntensity = lexpr.loc[idx, :].mean(axis=0)\n meanIntensity = lexpr[lexpr > cutoff].mean(axis=0)\n normFactor = meanIntensity - np.mean(meanIntensity)\n lexpr = lexpr - normFactor\n\n # Replace missing values (i.e. nan) with the 1/2 of the global minimum intensity\n minIntensity = np.nanmin(lexpr)\n lexpr = lexpr.fillna(minIntensity - 1) # Half of the global minimum intensity (at log2-scale)\n lexpr += lexpr.isna() * pd.DataFrame(1e-2 * np.random.uniform(0, 1, size=(lexpr.shape)),\n columns=lexpr.columns) # Add small random number for numerical stability\n df[intensityCols] = 2 ** lexpr\n return df\n\ndef generateFeatureFile(full, partial, unaligned, params):\n # Input arguments\n # full: fully-aligned features (numpy recarray)\n # partial: partially-aligned features (numpy recarray, or None)\n # unaligned: un-aligned features (list of numpy recarray, or None)\n # params: dictionary of parameters\n filePath = os.path.join(os.getcwd(), \"align_\" + params[\"output_name\"])\n if not os.path.exists(filePath):\n os.mkdir(filePath)\n\n # Organize fully-aligned features\n fullName = os.path.join(filePath, params[\"output_name\"] + \"_fully_aligned.feature\")\n dfFull = pd.DataFrame(full)\n dfFull[\"meanMz\"] = dfFull.filter(regex=(\".*mz$\")).mean(axis=1)\n dfFull = dfFull.sort_values(by=\"meanMz\", ignore_index=True) # Features are sorted by mean m/z\n colNames = dfFull.columns.tolist()\n colNames = colNames[-1:] + colNames[:-1]\n dfFull = dfFull[colNames]\n\n # Organize \"summarized\" fully-aligned features\n fullName2 = os.path.join(filePath, params[\"output_name\"] + \"_summarized_fully_aligned.feature\")\n dfFull2 = summarizeFeatures(full, params)\n dfFull2 = dfFull2.sort_values(by=\"feature_m/z\", ignore_index=True) # Features are sorted by \"feature_m/z\"\n dfFull2.insert(loc=0, column=\"feature_num\", value=dfFull2.index + 1)\n\n # Processing of quantity data (missing value imputation, normalization, etc.)\n dfFull2 = processQuantityData(dfFull2, params)\n intensityCols = [col for col in dfFull2.columns if col.lower().endswith(\"_intensity\")]\n for col in intensityCols:\n dfFull[col] = dfFull2[col] # Replace intensity values of dfFull with those of dfFull2 (preprocessed)\n\n # This file contains fully-aligned features with run-specific information\n # Since the run-specific information is required for MS2 processing, it should be kept\n dfFull.to_csv(fullName, index=False, sep=\"\\t\")\n # This file contains \"summarized\" fully-aligned features\n # Except intensity, all feature information is summarized over runs\n dfFull2.to_csv(fullName2, index=False, sep=\"\\t\")\n\n ##############################################\n # Write partially-aligned features, if exist #\n ##############################################\n if partial is not None:\n dfPartial = pd.DataFrame(partial)\n if dfPartial.shape[0] > 0:\n partialName = os.path.join(filePath, params[\"output_name\"] + \"_partially_aligned.feature\")\n dfPartial.to_csv(partialName, index=False, sep=\"\\t\")\n else:\n dfPartial = None\n\n #######################################\n # Write un-aligned features, if exist #\n #######################################\n dfArrayUnaligned = None\n if unaligned is not None:\n dfArrayUnaligned = []\n for un in unaligned:\n unName = [col for col in un.dtype.names if col.endswith('_mz')][0]\n unName = os.path.join(filePath, re.sub(\"_mz\", \"\", unName) + \"_unaligned.feature\")\n dfUn = pd.DataFrame(un)\n dfUn.to_csv(unName, index=False, sep=\"\\t\")\n dfArrayUnaligned.append(dfUn)\n else:\n dfArrayUnaligned = None\n\n return dfFull, dfPartial, dfArrayUnaligned\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":14132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"547381644","text":"from sklearn.ensemble import GradientBoostingRegressor\nimport numpy as np\nfrom sklearn.metrics import mean_squared_error\nimport math\n\nfeature_folder = './feature_folder/'\nproperty_set = {}\nwith open('property_set.txt', 'r') as f:\n i = 0\n for eachline in f:\n property_set[i] = eachline.strip()\n i += 1\n\ndef test(fold_num):\n data = []\n label = []\n with open(feature_folder+'train_'+str(fold_num)+'.txt', 'r') as f:\n for eachline in f:\n parts = eachline.strip().split('\\t')\n label.append(int(parts[0]))\n onedata = []\n feats = parts[1].split(' ')\n for eachfeat in feats:\n onedata.append(float(eachfeat))\n del onedata[0:3]\n data.append(onedata)\n\n est = GradientBoostingRegressor(n_estimators=100, learning_rate=0.1,max_depth=1, random_state=0, loss='ls')\n est.fit(np.array(data), np.array(label))\n\n test_data = []\n test_label = []\n with open(feature_folder+'test_'+str(fold_num)+'.txt','r') as f:\n for eachline in f:\n parts = eachline.strip().split('\\t')\n test_label.append(int(parts[0]))\n onedata = []\n feats = parts[1].split(' ')\n for eachfeat in feats:\n onedata.append(float(eachfeat))\n del onedata[0:3]\n test_data.append(onedata)\n\n predictions = est.predict(np.array(test_data))\n print('rmse: ', math.sqrt(mean_squared_error(test_label, predictions)))\n feat_imp = est.feature_importances_\n ind = np.argsort(-feat_imp)\n for i in range(10):\n print(property_set[ind[i]], feat_imp[ind[i]])\n\nif __name__ == '__main__':\n for i in range(7):\n test(i)\n\n\n","sub_path":"modelperfomance.py","file_name":"modelperfomance.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"434049766","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\n\nfrom alipay.aop.api.response.AlipayResponse import AlipayResponse\nfrom alipay.aop.api.domain.PromotionRelationDTO import PromotionRelationDTO\n\n\nclass AlipayOpenSpIsvRelationQueryResponse(AlipayResponse):\n\n def __init__(self):\n super(AlipayOpenSpIsvRelationQueryResponse, self).__init__()\n self._current_page = None\n self._page_size = None\n self._promotion_relations = None\n self._total_size = None\n\n @property\n def current_page(self):\n return self._current_page\n\n @current_page.setter\n def current_page(self, value):\n self._current_page = value\n @property\n def page_size(self):\n return self._page_size\n\n @page_size.setter\n def page_size(self, value):\n self._page_size = value\n @property\n def promotion_relations(self):\n return self._promotion_relations\n\n @promotion_relations.setter\n def promotion_relations(self, value):\n if isinstance(value, list):\n self._promotion_relations = list()\n for i in value:\n if isinstance(i, PromotionRelationDTO):\n self._promotion_relations.append(i)\n else:\n self._promotion_relations.append(PromotionRelationDTO.from_alipay_dict(i))\n @property\n def total_size(self):\n return self._total_size\n\n @total_size.setter\n def total_size(self, value):\n self._total_size = value\n\n def parse_response_content(self, response_content):\n response = super(AlipayOpenSpIsvRelationQueryResponse, self).parse_response_content(response_content)\n if 'current_page' in response:\n self.current_page = response['current_page']\n if 'page_size' in response:\n self.page_size = response['page_size']\n if 'promotion_relations' in response:\n self.promotion_relations = response['promotion_relations']\n if 'total_size' in response:\n self.total_size = response['total_size']\n","sub_path":"alipay/aop/api/response/AlipayOpenSpIsvRelationQueryResponse.py","file_name":"AlipayOpenSpIsvRelationQueryResponse.py","file_ext":"py","file_size_in_byte":2049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"449360538","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\n\nfrom alipay.aop.api.constant.ParamConstants import *\n\n\nclass AlipayMarketingCashlessticketTemplateModifyModel(object):\n\n def __init__(self):\n self._out_biz_no = None\n self._publish_end_time = None\n self._rule_conf = None\n self._template_id = None\n\n @property\n def out_biz_no(self):\n return self._out_biz_no\n\n @out_biz_no.setter\n def out_biz_no(self, value):\n self._out_biz_no = value\n @property\n def publish_end_time(self):\n return self._publish_end_time\n\n @publish_end_time.setter\n def publish_end_time(self, value):\n self._publish_end_time = value\n @property\n def rule_conf(self):\n return self._rule_conf\n\n @rule_conf.setter\n def rule_conf(self, value):\n self._rule_conf = value\n @property\n def template_id(self):\n return self._template_id\n\n @template_id.setter\n def template_id(self, value):\n self._template_id = value\n\n\n def to_alipay_dict(self):\n params = dict()\n if self.out_biz_no:\n if hasattr(self.out_biz_no, 'to_alipay_dict'):\n params['out_biz_no'] = self.out_biz_no.to_alipay_dict()\n else:\n params['out_biz_no'] = self.out_biz_no\n if self.publish_end_time:\n if hasattr(self.publish_end_time, 'to_alipay_dict'):\n params['publish_end_time'] = self.publish_end_time.to_alipay_dict()\n else:\n params['publish_end_time'] = self.publish_end_time\n if self.rule_conf:\n if hasattr(self.rule_conf, 'to_alipay_dict'):\n params['rule_conf'] = self.rule_conf.to_alipay_dict()\n else:\n params['rule_conf'] = self.rule_conf\n if self.template_id:\n if hasattr(self.template_id, 'to_alipay_dict'):\n params['template_id'] = self.template_id.to_alipay_dict()\n else:\n params['template_id'] = self.template_id\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = AlipayMarketingCashlessticketTemplateModifyModel()\n if 'out_biz_no' in d:\n o.out_biz_no = d['out_biz_no']\n if 'publish_end_time' in d:\n o.publish_end_time = d['publish_end_time']\n if 'rule_conf' in d:\n o.rule_conf = d['rule_conf']\n if 'template_id' in d:\n o.template_id = d['template_id']\n return o\n\n\n","sub_path":"alipay/aop/api/domain/AlipayMarketingCashlessticketTemplateModifyModel.py","file_name":"AlipayMarketingCashlessticketTemplateModifyModel.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"493844872","text":"# (C) Copyright 1996- ECMWF.\n# \n# This software is licensed under the terms of the Apache Licence Version 2.0\n# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.\n# In applying this licence, ECMWF does not waive the privileges and immunities\n# granted to it by virtue of its status as an intergovernmental organisation\n# nor does it submit to any jurisdiction.\n\nimport collections.abc\nimport sys\nfrom typing import Dict\n\nfrom . import logger\n\n\nclass Config:\n \"\"\"\n This class is in charge of holding the configuration for the monitoring system, including UDP server and reporters,\n which can be defined by arguments, environment variables or defaults.\n \"\"\"\n\n def __init__(self,\n udp_server=None,\n monitor_server=None,\n aviso_rest_reporter=None,\n aviso_auth_reporter=None,\n etcd_reporter=None):\n\n try:\n # we build the configuration in priority order from the lower to the higher\n # start from the defaults\n self._config = self._create_default_config()\n # add environment variables\n Config.deep_update(self._config, self._read_env_variables())\n # add constructor parameters\n self.udp_server = udp_server\n self.monitor_server = monitor_server\n self.aviso_rest_reporter = aviso_rest_reporter\n self.aviso_auth_reporter = aviso_auth_reporter\n self.etcd_reporter = etcd_reporter\n\n logger.debug(f\"Loading configuration completed\")\n\n except Exception as e:\n logger.error(f\"Error occurred while setting the configuration, exception: {type(e)} {e}\")\n logger.debug(\"\", exc_info=True)\n sys.exit(-1)\n\n @staticmethod\n def _create_default_config() -> Dict:\n\n udp_server = {\n \"host\": \"127.0.0.1\",\n \"port\": 1111,\n \"buffer_size\": 64 * 1024\n }\n # this are the setting for sending the telemetry to a monitoring server like Opsview\n monitor_server = {\n \"url\": \"https://localhost\",\n \"username\": \"TBD\",\n \"password\": \"TBD\",\n \"service_host\": \"aviso\",\n \"req_timeout\": 60, # seconds\n\n }\n aviso_rest_reporter = {\n \"tlm_type\": \"rest_resp_time\",\n \"enabled\": False,\n \"frequency\": 1, # in minutes\n \"warning_t\": 10, # s\n \"critical_t\": 20, # s\n }\n\n aviso_auth_reporter = {\n \"tlm_type\": \"auth_resp_time\",\n \"enabled\": False,\n \"frequency\": 1, # in minutes\n \"warning_t\": 10, # s\n \"critical_t\": 20, # s\n \"sub_tlms\": []\n }\n\n etcd_reporter = {\n \"enabled\": False,\n \"frequency\": 5, # in minutes\n \"member_urls\": [\"http://localhost:2379\"],\n \"tlm_type\": [\"etcd_store_size\", \"etcd_cluster_status\", \"etcd_total_keys\"],\n \"req_timeout\": 60, # seconds\n }\n\n # main config\n config = {}\n config[\"udp_server\"] = udp_server\n config[\"monitor_server\"] = monitor_server\n config[\"aviso_rest_reporter\"] = aviso_rest_reporter\n config[\"aviso_auth_reporter\"] = aviso_auth_reporter\n config[\"etcd_reporter\"] = etcd_reporter\n return config\n\n def _read_env_variables(self) -> Dict:\n config = {}\n # TBD\n return config\n\n @property\n def udp_server(self):\n return self._udp_server\n\n @udp_server.setter\n def udp_server(self, udp_server):\n u = self._config.get(\"udp_server\")\n if udp_server is not None and u is not None:\n Config.deep_update(u, udp_server)\n elif udp_server is not None:\n u = udp_server\n # verify is valid\n assert u is not None, \"udp_server has not been configured\"\n assert u.get(\"host\") is not None, \"udp_server host has not been configured\"\n assert u.get(\"port\") is not None, \"udp_server port has not been configured\"\n assert u.get(\"buffer_size\") is not None, \"udp_server buffer_size has not been configured\"\n self._udp_server = u\n\n @property\n def monitor_server(self):\n return self._monitor_server\n\n @monitor_server.setter\n def monitor_server(self, monitor_server):\n m = self._config.get(\"monitor_server\")\n if monitor_server is not None and m is not None:\n Config.deep_update(m, monitor_server)\n elif monitor_server is not None:\n m = monitor_server\n # verify is valid\n assert m is not None, \"monitor_server has not been configured\"\n assert m.get(\"url\") is not None, \"monitor_server url has not been configured\"\n assert m.get(\"username\") is not None, \"monitor_server username has not been configured\"\n assert m.get(\"password\") is not None, \"monitor_server password has not been configured\"\n assert m.get(\"service_host\") is not None, \"monitor_server service_host has not been configured\"\n assert m.get(\"req_timeout\") is not None, \"monitor_server req_timeout has not been configured\"\n self._monitor_server = m\n\n @property\n def aviso_rest_reporter(self):\n return self._aviso_rest_reporter\n\n @aviso_rest_reporter.setter\n def aviso_rest_reporter(self, aviso_rest_reporter):\n ar = self._config.get(\"aviso_rest_reporter\")\n if aviso_rest_reporter is not None and ar is not None:\n Config.deep_update(ar, aviso_rest_reporter)\n elif aviso_rest_reporter is not None:\n ar = aviso_rest_reporter\n # verify is valid\n assert ar is not None, \"aviso_rest_reporter has not been configured\"\n assert ar.get(\"tlm_type\") is not None, \"aviso_rest_reporter tlm_type has not been configured\"\n assert ar.get(\"enabled\") is not None, \"aviso_rest_reporter enabled has not been configured\"\n assert ar.get(\"frequency\") is not None, \"aviso_rest_reporter frequency has not been configured\"\n assert ar.get(\"warning_t\") is not None, \"aviso_rest_reporter warning_t has not been configured\"\n assert ar.get(\"critical_t\") is not None, \"aviso_rest_reporter critical_t has not been configured\"\n self._aviso_rest_reporter = ar\n\n @property\n def aviso_auth_reporter(self):\n return self._aviso_auth_reporter\n\n @aviso_auth_reporter.setter\n def aviso_auth_reporter(self, aviso_auth_reporter):\n aa = self._config.get(\"aviso_auth_reporter\")\n if aviso_auth_reporter is not None and aa is not None:\n Config.deep_update(aa, aviso_auth_reporter)\n elif aviso_auth_reporter is not None:\n aa = aviso_auth_reporter\n # verify is valid\n assert aa is not None, \"aviso_auth_reporter has not been configured\"\n assert aa.get(\"tlm_type\") is not None, \"aviso_auth_reporter tlm_type has not been configured\"\n assert aa.get(\"enabled\") is not None, \"aviso_auth_reporter enabled has not been configured\"\n assert aa.get(\"frequency\") is not None, \"aviso_auth_reporter frequency has not been configured\"\n assert aa.get(\"warning_t\") is not None, \"aviso_auth_reporter warning_t has not been configured\"\n assert aa.get(\"critical_t\") is not None, \"aviso_auth_reporter critical_t has not been configured\"\n self._aviso_auth_reporter = aa\n\n @property\n def etcd_reporter(self):\n return self._etcd_reporter\n\n @etcd_reporter.setter\n def etcd_reporter(self, etcd_reporter):\n e = self._config.get(\"etcd_reporter\")\n if etcd_reporter is not None and e is not None:\n Config.deep_update(e, etcd_reporter)\n elif etcd_reporter is not None:\n e = etcd_reporter\n # verify is valid\n assert e is not None, \"etcd_reporter has not been configured\"\n assert e.get(\"tlm_type\") is not None, \"etcd_reporter tlm_type has not been configured\"\n assert e.get(\"enabled\") is not None, \"etcd_reporter enabled has not been configured\"\n assert e.get(\"frequency\") is not None, \"etcd_reporter frequency has not been configured\"\n assert e.get(\"member_urls\") is not None, \"etcd_reporter member_urls has not been configured\"\n assert e.get(\"req_timeout\") is not None, \"etcd_reporter req_timeout has not been configured\"\n self._etcd_reporter = e\n\n def __str__(self):\n config_string = (\n f\"udp_server: {self.udp_server}\" +\n f\", monitor_server: {self.monitor_server}\" +\n f\", aviso_rest_reporter: {self.aviso_rest_reporter}\" +\n f\", aviso_auth_reporter: {self.aviso_auth_reporter}\" +\n f\", etcd_reporter: {self.etcd_reporter}\"\n )\n return config_string\n\n def _configure_property(self, param, name):\n value = None\n if param is not None:\n value = param\n elif self._config.get(name) is not None:\n # Setting var from user configuration file\n value = self._config.get(name)\n else:\n logger.error(f\"{name} has not been configured\")\n sys.exit(-1)\n return value\n\n @staticmethod\n def deep_update(d, u):\n for k, v in u.items():\n if isinstance(v, collections.abc.Mapping):\n d[k] = Config.deep_update(d.get(k, type(v)()), v)\n else:\n d[k] = v\n return d\n","sub_path":"aviso-server/monitoring/aviso_monitoring/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":9422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"150304946","text":"# api/serializers.py\nfrom rest_framework import serializers\nfrom WebAdminRadio import models\nfrom accounts.models import Usuario\n\nclass TimeSerializer(serializers.Serializer):\n fecha= serializers.DateField()\n hora= serializers.TimeField(format='%H:%M:%S')\n\nclass SegmentoSerializer(serializers.ModelSerializer):\n horarios = serializers.ReadOnlyField(source=\"get_horarios\")\n class Meta:\n fields = (\n 'id',\n 'nombre',\n 'slogan',\n 'descripcion',\n 'idEmisora',\n 'imagen',\n 'horarios'\n )\n model = models.Segmento\n\nclass EmisoraSerializer(serializers.ModelSerializer):\n class Meta:\n fields = '__all__'\n model = models.Emisora\n\n#SEGMENTOS CON HORARIOS\nclass SegmentoSerializerFull(serializers.ModelSerializer):\n horarios = serializers.ReadOnlyField(source=\"get_horarios\")\n\n class Meta:\n model = models.Segmento\n fields = ('id', 'nombre', 'imagen','idEmisora', 'slogan','descripcion', 'horarios')\n\n#SEGMENTOS DEL DIA ACTUAL\nclass SegmentoSerializerToday(serializers.ModelSerializer):\n horarios = serializers.ReadOnlyField(source=\"get_horario_dia_actual\")\n emisora= serializers.SerializerMethodField()\n\n def get_emisora(self,ob):\n return EmisoraSerializer(ob.get_emisora()).data\n\n\n class Meta:\n model = models.Segmento\n fields = ('id', 'nombre', 'imagen','idEmisora', 'horarios','descripcion','emisora')\n\n\nclass LocutoresSerializer(serializers.ModelSerializer):\n emisora = serializers.SerializerMethodField()\n\n class Meta:\n model = Usuario\n fields = (\n 'id',\n 'imagen',\n 'first_name',\n 'last_name',\n 'emisora',\n )\n\n def get_emisora(self, obj):\n segmento_id = self.context.get('segmento')\n segmento_obj = models.segmento_usuario.objects.get(idSegmento=segmento_id, idUsuario=obj.id)\n return segmento_obj.idSegmento.idEmisora.nombre\n\nclass UsuarioSerializer(serializers.ModelSerializer):\n password = serializers.CharField(write_only=True)\n\n def create(self, validated_data):\n\n usuario = Usuario.objects.create(\n username=validated_data['username'],\n email = validated_data['email'],\n first_name = validated_data['first_name'],\n last_name = validated_data['last_name'],\n fecha_nac = validated_data['fecha_nac'],\n rol = validated_data['rol'],\n )\n usuario.set_password(validated_data['password'])\n usuario.save()\n\n return usuario\n\n class Meta:\n fields = (\n 'id',\n 'username',\n 'imagen',\n 'email',\n 'first_name',\n 'last_name',\n 'password',\n 'fecha_nac',\n 'is_active',\n 'rol',\n )\n model = Usuario\n\nclass PublicidadSerializer(serializers.ModelSerializer):\n emisora = serializers.SerializerMethodField()\n\n class Meta:\n model = models.Publicidad\n fields = (\n 'id',\n 'imagen',\n 'titulo',\n 'cliente',\n 'emisora',\n )\n\n def get_emisora(self, obj):\n segmento_id = self.context.get('segmento')\n segmento_obj = models.segmento_publicidad.objects.get(idSegmento=segmento_id, idPublicidad=obj.id)\n return segmento_obj.idSegmento.idEmisora.nombre\n\nclass FrecuenciaSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Frecuencia\n fields = (\n 'tipo',\n 'dia_semana',\n 'hora_inicio',\n 'hora_fin',\n )\n\nclass TelefonoEmisoraSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Telefono_emisora\n fields= '__all__'\n\nclass RedSocialEmisoraSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.RedSocial_emisora\n fields= '__all__'\n\nclass TelefonoUsuarioSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Telefono_Usuario\n fields= '__all__'\n\nclass RedSocialUsuarioSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.RedSocial_usuario\n fields= '__all__'\n\nclass LocutoresSegmentoSerializer(serializers.ModelSerializer):\n redes_sociales= serializers.ReadOnlyField(source=\"get_redes_sociales\")\n\n #def get_redes_sociales(self,ob):\n # return RedSocialUsuarioSerializer(ob.get_redes_sociales()).data\n\n\n class Meta:\n model = Usuario\n fields=(\n 'id',\n 'imagen',\n 'first_name',\n 'last_name',\n 'biografia',\n 'fecha_nac',\n 'hobbies',\n 'apodo',\n 'redes_sociales',\n )\n\n\nclass ImagenesSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Imagenes\n fields= '__all__'\n\nclass VideosSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Videos\n fields= '__all__'\n\nclass FavoritoSerializer(serializers.ModelSerializer):\n emisora= serializers.SerializerMethodField()\n\n def get_emisora(self,ob):\n return EmisoraSerializer(ob.get_emisora()).data\n\n class Meta:\n model = models.Segmento\n fields = ('id', 'nombre', 'imagen','idEmisora','descripcion','emisora')\n\nclass EncuestaSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Encuesta\n fields = (\n 'id',\n 'titulo',\n 'fecha_inicio',\n 'hora_fin',\n 'dia_fin',\n 'activo'\n )\n\nclass FavoritoCreateSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Favorito\n fields = \"__all__\"\n\n\n\n\n\n\n\n\n","sub_path":"AppRadio/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":5797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"454596993","text":"from tkinter import *\nfrom contenido.DataBase import CrearConexion, CrearCursor, CrearTablaContactos, CrearTablaUsuarios\nfrom tkinter import ttk\nimport pymysql\nfrom tkinter import messagebox\nimport contenido.contactos\nimport datetime\nimport hashlib\nconexion = CrearConexion(\"b6bauoez7gkwtblar05j\")\ncursor = CrearCursor(conexion)\n\nclass Ventana_login():\n\n def __init__(self):\n self.menu_inicio = Tk()\n self.menu_inicio.geometry(\"320x320\")\n self.menu_inicio.title(\"Acceso Usuarios\")\n\n # Botones de acceso y de registro para usuarios\n self.message = Label(self.menu_inicio, text = \"Seleccione opción\")\n self.message.place(relx = 0.15 , rely = 0.10, relwidth = 0.7, relheight = 0.10)\n self.botonAcceso = Button(self.menu_inicio, text = \"Acceso\", command = self.AccesoUsuario)\n self.botonAcceso.pack()\n self.botonAcceso.place(relx = 0.25, rely = 0.25, relwidth = 0.50, relheight = 0.15)\n\n self.botonRegistro = Button(self.menu_inicio, text = \"Registro\", command = self.RegistroUsuario)\n self.botonRegistro.pack()\n self.botonRegistro.place(relx = 0.25, rely = 0.55, relwidth = 0.50, relheight = 0.15)\n\n self.message1 = Label(self.menu_inicio, text = \"\", fg = \"green\")\n self.message1.place(relx = 0.10, rely = 0.90, relwidth = 0.8, relheight = 0.10)\n\n # Función para acceso de usuarios registrados\n def AccesoUsuario(self):\n self.ventana_acceso = Toplevel()\n self.ventana_acceso.title(\"Usuario registrado\")\n self.ventana_acceso.geometry(\"300x300\")\n\n Label(self.ventana_acceso, text = \"Introduce tu usuario y contraseña\").place(relx = 0.13, rely =0.02, relwidth = 0.7, relheight = 0.10)\n Label(self.ventana_acceso, text = \"Nombre de usuario: \").place(relx = 0.15 , rely = 0.12, relwidth = 0.7, relheight = 0.10)\n usuario = Entry(self.ventana_acceso)\n usuario.place(relx = 0.25, rely = 0.22, relwidth = 0.50, relheight = 0.15)\n\n Label(self.ventana_acceso, text = \"Contraseña: \").place(relx = 0.15 , rely = 0.40, relwidth = 0.7, relheight = 0.10)\n contraseña = Entry(self.ventana_acceso,show = \"*\")\n contraseña.place(relx = 0.25, rely = 0.50, relwidth = 0.50, relheight = 0.15)\n\n Button(self.ventana_acceso, text = \"Acceder\", command = lambda:self.Acceder(usuario.get(), contraseña.get())).place(relx = 0.25, rely = 0.75, relwidth = 0.50, relheight = 0.15)\n\n # Ventana para registro de nuevos usuarios\n def RegistroUsuario(self):\n self.ventana_registro = Toplevel()\n self.ventana_registro.title(\"Nuevos usuarios\")\n self.ventana_registro.geometry(\"300x300\")\n\n Label(self.ventana_registro, text = \"Escoge tu usuario y contraseña\").place(relx = 0.13, rely =0.02, relwidth = 0.7, relheight = 0.10)\n Label(self.ventana_registro, text = \"Nombre de usuario\").place(relx = 0.12 , rely = 0.12, relwidth = 0.7, relheight = 0.10)\n usuario = Entry(self.ventana_registro)\n usuario.place(relx = 0.25, rely = 0.22, relwidth = 0.40, relheight = 0.10)\n\n self.boton_comprobar = Button(self.ventana_registro, text = \"Comprobar\", command = lambda:self.Usuario_Registrado( usuario.get()))\n self.boton_comprobar.place(relx = 0.67, rely = 0.22, relwidth = 0.30, relheight = 0.10) \n \n Label(self.ventana_registro, text = \"Contraseña: \").place(relx = 0.12 , rely = 0.43, relwidth = 0.7, relheight = 0.10)\n contraseña = Entry(self.ventana_registro, show = \"*\")\n contraseña.place(relx = 0.25, rely = 0.53, relwidth = 0.40, relheight = 0.10)\n\n self.boton_registro = Button(self.ventana_registro, text = \"Registrarse\", command=lambda:self.Registrarse(usuario.get(), contraseña.get()))\n \n self.boton_registro.place_forget()\n\n # Comprobar si el usuario esta registrado:\n def Usuario_Registrado(self, usuario):\n query = \"SELECT * FROM Usuarios WHERE usuario = %s \"\n parameters = (usuario,)\n cursor.execute(query, parameters)\n datos_registro = cursor.fetchall()\n if datos_registro:\n self.message = Label(self.ventana_registro, text = \"\", fg = \"red\")\n self.message.place(relx = 0.20, rely = 0.33, relwidth = 0.8, relheight = 0.10)\n self.message['text'] = \"El usuario ya existe\"\n else:\n self.message = Label(self.ventana_registro, text = \"\", fg = \"green\")\n self.message.place(relx = 0.20, rely = 0.33, relwidth = 0.8, relheight = 0.10)\n self.message['text'] = \"Usuario disponible\"\n self.boton_registro.place(relx = 0.25, rely = 0.73, relwidth = 0.40, relheight = 0.10)\n\n # Función para guardar los datos de registro del usuario en la BD\n def Registrarse(self, usuario,contraseña):\n query = 'INSERT INTO Usuarios(usuario, contraseña) VALUES (%s,SHA2(%s, 256))'\n encriptada = contraseña\n encriptada = encriptada.encode()\n h = hashlib.sha256(encriptada)\n pass_encript = h.digest()\n print(h.digest())\n parameters = (usuario, pass_encript)\n cursor.execute(query, parameters)\n conexion.commit()\n self.ventana_registro.destroy()\n self.message1['text'] = \"Usuario creado satisfactoriamente\"\n\n # Función para dar acceso a la agenda al usuario registrado\n def Acceder(self, usuario, contraseña):\n query = \"SELECT * FROM Usuarios WHERE usuario = %s AND contraseña = SHA2(%s,256)\"\n encriptada = contraseña\n encriptada = encriptada.encode()\n h = hashlib.sha256(encriptada)\n pass_encript = h.digest()\n parameters = (usuario, pass_encript)\n cursor.execute(query, parameters)\n datos_usuario = cursor.fetchall() \n \n if datos_usuario: #Si esta registrado, abre la agenda e inserta el usuario en la tabla Logueado\n messagebox.showinfo(message = \"Usuario y contraseña correctos\", title = \"Login Correcto\")\n usuario_logueado = (datos_usuario[0][1])\n currenttime = str(datetime.datetime.now())\n cursor.execute(\"INSERT INTO Logueado (Nombre, Fecha) VALUES ('\"+usuario_logueado+\"', '\"+currenttime+\"') \")\n conexion.commit()\n self.ventana_acceso.destroy()\n self.menu_inicio.destroy()\n contenido.contactos.Mostrar_Menu() \n else :\n messagebox.showinfo(message = \"Usuario o contraseña incorrectos\", title = \"Login Incorrecto\")\n \n\ndef Mostrar():\n aplicacion1 = Ventana_login()\n\n\n\n","sub_path":"contenido/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":6566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"341524483","text":"# -*- coding: utf-8 -*-\n'''\nИз POSTagger-корпуса создаем обучающий датасет для CRFSuite.\n'''\n\nfrom __future__ import print_function\nimport codecs\nimport collections\n\nimport sys\n\nmax_sent = 1000000\n\nsrc_dataset = sys.argv[1]\n\ncrftrain_dataset = r'crfsuite_train.dat'\ncrftest_dataset = r'crfsuite_test.dat'\n\nUSE_SUFFIX_FEATURE = True\nMIN_SUFFIX_LEN = 3\nMAX_SUFFIX_LEN = 3\nWINDOW=3 # полный размер окна контекста\n\nMAX_PATTERNS = 1000000\n\nHOLDOUT_Nth = 10 # каждый N-ый сэмпл (предложение/паттерн) в тестовый набор\n\nUSE_LEXICON = False # добавлять морфологические теги слов из word2tag.dat\n\nADD_WORDCLUSTER_FEATURES = True # добавлять номера тегов слов из word2cluster (получены кластеризацией w2v-векторов слов)\n\nADD_WORD_FEATURES = True # добавлять самые частотные слова как самостоятельные фичи\nMIN_FEATUREWORD_FREQ = 100\n\n# ------------------------------------------------------------ \n\nwin = (WINDOW-1)/2\n\n# ------------------------------------------------------------ \n\nword2tags = dict()\nword2cluster = dict()\nfeatured_words = set()\n\n\ndef extract_features( word, feature_name_prefix ):\n features = []\n \n if word==u'':\n features.append( feature_name_prefix+u'' )\n else: \n lword = word.lower() \n if word[0].isupper() and word[1:].islower():\n features.append( feature_name_prefix+u'Aa' )\n elif word.isupper():\n features.append( feature_name_prefix+u'AA' )\n\n wlen = len(word)\n if USE_SUFFIX_FEATURE:\n if wlen>2:\n for suffix_len in range(MIN_SUFFIX_LEN,min(MAX_SUFFIX_LEN,wlen-1)+1):\n suffix = lword[ wlen-suffix_len : wlen ]\n features.append( feature_name_prefix + u'sfx='+suffix )\n #print( u'DEBUG word={0} suffix_len={1} suffix={2}'.format(word,suffix_len,suffix) )\n #raw_input( 'press a key' )\n else:\n features.append( feature_name_prefix+u'word='+lword )\n \n if lword in word2tags:\n tags = word2tags[lword]\n for tag in tags:\n features.append( feature_name_prefix+u'tag='+tag )\n \n if lword in word2cluster:\n features.append( feature_name_prefix+u'cluster='+word2cluster[lword] )\n\n if lword in featured_words:\n features.append( feature_name_prefix+u'featured_word='+lword )\n \n return features\n\n# ------------------------------------------------------------ \n\nif USE_LEXICON==True:\n with codecs.open( 'word2tags.dat', 'r', 'utf-8-sig' ) as rdr:\n print( 'Loading lexicon...' )\n for line in rdr:\n parts = line.strip().split(u'\\t')\n word = parts[0].lower()\n part_of_speech = parts[1]\n tags = set( parts[2].split(u' ') ) if len(parts)==3 else set()\n tags.add( part_of_speech )\n \n if word not in word2tags:\n word2tags[word] = tags\n else:\n word2tags[word].update(tags)\n\n print( '{0} words in lexicon'.format(len(word2tags)) )\n\n# ------------------------------------------------------------ \n\nif ADD_WORDCLUSTER_FEATURES==True:\n with codecs.open( 'word2cluster.dat','r','utf-8') as rdr:\n print( 'Loading word2cluster...' )\n for line in rdr:\n parts = line.strip().split(u'\\t')\n word = parts[0].lower()\n word2cluster[word] = parts[1]\n\n# ------------------------------------------------------------ \n \nif ADD_WORD_FEATURES==True:\n word2freq = collections.Counter()\n with codecs.open( src_dataset, 'r', 'utf-8' ) as rdr:\n for line in rdr:\n parts = line.strip().split(u'\\t')\n if len(parts)>=4:\n word = parts[1].lower()\n word2freq[word] += 1\n \n featured_words = set( [ word for word,cnt in filter( lambda z:z[1]>=MIN_FEATUREWORD_FREQ, word2freq.iteritems() ) ] )\n \n print( 'There are {0} featured words'.format( len(featured_words) ) )\n\n# ------------------------------------------------------------ \n\nprint( 'Building tagsets...' )\ntagset2id = dict()\ntotal_nb_sent = 0\nwith codecs.open( src_dataset, 'r', 'utf-8' ) as rdr:\n for line in rdr:\n parts = line.strip().split(u'\\t')\n if len(parts)>=4:\n tagset = parts[3] # part of speech\n if len(parts)==5:\n tagset = tagset + u' ' + parts[4] # append tags\n if tagset not in tagset2id:\n tagset2id[tagset] = len(tagset2id)\n else:\n # пустая строка - разделитель предложений\n total_nb_sent += 1\n\nprint( 'total number of sentences={0}'.format(total_nb_sent) ) \nprint( 'number of tagsets={0}'.format(len(tagset2id)) )\n \nwith codecs.open( 'id2tagset.dat', 'w', 'utf-8' ) as wrt:\n for tagset,id in tagset2id.iteritems():\n wrt.write( u'{0}\\t{1}\\n'.format(id,tagset) )\n\n# ------------------------------------------------------------ \n\nprint( 'Converting...' )\n\nsent_count=0\npattern_count=0\nrdr = codecs.open( src_dataset, 'r', 'utf-8' )\nwrt1_train = codecs.open( crftrain_dataset, 'w', 'utf-8' )\nwrt1_test = codecs.open( crftest_dataset, 'w', 'utf-8' )\n\nsent = []\nfor line in rdr:\n\n if pattern_count>=MAX_PATTERNS:\n break\n\n parts = line.strip().split(u'\\t')\n if len(parts)==0 or len(parts[0])==0:\n # end of sentence\n sent_count += 1\n if (sent_count%1000)==0:\n print( '{0}/{1}'.format(sent_count,total_nb_sent), end='\\r' )\n\n nword = len(sent)\n for i in range(nword):\n token_features = []\n for j in range(WINDOW):\n word_index = i-win+j\n word = u''\n if word_index>=0 and word_index {1}'.format( sent[i][0], tags ) )\n #raw_input('press a key...' )\n \n if (sent_count%HOLDOUT_Nth)==0:\n wrt1_test.write( u'{0}\\t{1}\\n'.format(target_tagset, tags ) )\n else:\n wrt1_train.write( u'{0}\\t{1}\\n'.format(target_tagset, tags ) )\n\n pattern_count += 1\n if pattern_count>=MAX_PATTERNS:\n break\n\n if (sent_count%HOLDOUT_Nth)==0:\n wrt1_test.write('\\n')\n else:\n wrt1_train.write( '\\n' )\n\n sent = []\n else:\n word = parts[1]\n tagset = parts[3] # part of speech\n if len(parts)==5:\n tagset = tagset + u' ' + parts[4] # tags\n sent.append( (word,tagset) )\n\nwrt1_train.close()\nwrt1_test.close()\n\nprint( 'Generation complete.' )\n","sub_path":"ENSEMBLE/train_CRF.py","file_name":"train_CRF.py","file_ext":"py","file_size_in_byte":7204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"390945962","text":"# %%\nimport numpy as np\nimport pandas as pd\nimport scipy as sp\nimport math\nimport matplotlib.animation as animation\nfrom scipy.integrate import odeint\nfrom numpy import arange\nfrom scipy.integrate import odeint\nimport scipy.optimize \nfrom scipy.optimize import leastsq\nfrom math import exp\nfrom collections import OrderedDict\nfrom sklearn.linear_model import LinearRegression\npd.options.mode.chained_assignment = None\nimport git\n\n# Find home directory for repo\nrepo = git.Repo(\"./\", search_parent_directories=True)\nhomedir = repo.working_dir\n\n# %% \n# Load data\ndf_data = pd.read_csv (f'{homedir}/data/raw/EA/20191219_EA.csv')\n\n# Keep only rows with Peak Nr=2, since Peak Nr=1 \n# and Peak Nr=3 correspond to the reference gas\n#Select rows to drop\ni1 = df_data[df_data['Peak_Nr'] == 1].index\ni3 = df_data[df_data['Peak_Nr'] == 3].index\n#Drop rows in place\ndf_data.drop(i1, inplace=True)\ndf_data.drop(i3, inplace=True)\ndf_data.head()\n\n# %% \n# Import table with the raw data for blanks\ndf_blanks = pd.read_csv(f'{homedir}/data/raw/EA/20191219_blanks.csv')\n\n# Remove rows with missing data\ndf_blanks.dropna(how='any', inplace=True)\n\n# Keep only rows with Peak Nr=2\n#Select rows to drop\ni1 = df_blanks[df_blanks['Peak_Nr'] == 1].index\ni3 = df_blanks[df_blanks['Peak_Nr'] == 3].index\n#Drop rows in place\ndf_blanks.drop(i1, inplace=True)\ndf_blanks.drop(i3, inplace=True)\ndf_blanks.head()\n\n# %% \n# Import linearity data\ndf_lin = pd.read_csv(f'{homedir}/data/raw/EA/20191219_linearity.csv')\ndf_lin.head()\n\n# %% \n# Blank correction\n# Get the average area all for the blanks\nblank_area_all_average = df_blanks[\"Area_All\"].mean()\n\n# Get the average d34S for the blanks\nblank_d34s_average = df_blanks[\"d34S\"].mean()\n\n# Append a column to the data dataframe with the \n# correction of area by blank: substraction of the\n# area of the measurement by the area of the blank\n\ndf_data['Correction_of_area_blank'] = \\\n np.subtract(df_data['Area_All'], blank_area_all_average)\n\n# Correction of d34S by blank: \n \n# Get the product of the d34S by the area of each measurement\nnum1 = np.multiply (df_data['d34S'], df_data['Area_All'])\n# Get the product of the d34S by the area of the average of the blanks\nnum2 = blank_d34s_average * blank_area_all_average \n#Subtract the product of d34S by the product of the blanks\nnum = np.subtract (num1, num2)\n#Divide by the area corrected by blank and\n#append column to the data dataframe\ndf_data['Correction_of_d34S_blank'] = \\\n np.divide (num, df_data['Correction_of_area_blank'])\n# %% \n# Linearity correction\n# Create variables for linear regression\n# x will be the amplitude of the peak of mass 64 = ^32S + ^16O + ^16O\n# We divided by 1000 to convert volts ot milivolts\nx = (df_lin['Ampl_64'].values/1000).reshape((-1,1))\n# y will be the values of d34S\ny = (df_lin['d34S'])\n\n# Create model of linear regression\nmodel = LinearRegression().fit(x,y)\n\n#Determine R square, intercept and slope\nr_sq = model.score(x,y)\nintercept = model.intercept_\ns = model.coef_\nslope=s[0]\n\n# Calculate an amplitude difference by centering around an arbitrary \n# value in the Ampl_64 column of the data dataframe \nnum= np.subtract(df_data['Ampl_64'], 1200)\nampl_difference = np.divide (num, 1000)\n\n# Calculate the amplitude correction factor\nampl_correction_factor = (slope * ampl_difference)+intercept\n\n# Correct the d34S corrected by amplitude by\n# subtracting the blank corrected data by the amplitude correction\n# factor and append the column to the data dataframe\ndf_data['Correction_of_d34S_by_amplitude'] = \\\n np.subtract(df_data['Correction_of_d34S_blank'], ampl_correction_factor)\n\n# %% \n#Standard correction\n\n#Create a category column according to the area the peak of each sample and standard\n# Correct each sample by standards with similar areas\n\n#Create standard correction group empty list\nStd_group = []\n# Classify the area of the samples by category \n# Loop through dataframe rows\nfor row in df_data['Area_All']:\n if row <= 65:\n Std_group.append ('low')\n else:\n Std_group.append ('high') \n# Append to data dataframe \ndf_data['Std_group'] = Std_group\ndf_data.head()\n\n# %%\n#Create a dataframe for standards and ditch the outliers\n# Create df with only standard data\ndf_standards = df_data[(df_data.Type == 'Standard')] \n#Sort values by ID and area\ndf_standards = df_standards.sort_values(['Identifier', 'Area_All'])\n\n#Drop outliers in place\ndf_standards.drop(df_standards.index[10], inplace=True)\ndf_standards.drop(df_standards.index[0], inplace=True)\n\n# Calculate the slope and intercept for calculated vs. true value of the stds\n\n# Append true value column to the standard table\n# For sulfanilamide\ndf_standards.loc[df_standards['Identifier'] \\\n == 'Sulfanilamide', 'True_d34S'] = 2.42\n# For seawater\ndf_standards.loc[df_standards['Identifier'] \\\n == 'SW', 'True_d34S'] = 21\n \n# Group by Std_group\ndf_stdgroup = df_standards.groupby (['Std_group'])\n\ndf_standards.head()\n\n# %%\n#Linear regression for the standards of each area group\n# Get the slope and intercept for the standards of each group\n# Define column names\nnames = ['Std_group', 'R squared', 'Intercept', 'Slope']\n\n# Initialize empty dataframe to save fit results\ndf_linreg_stds = pd.DataFrame(columns=names)\n \n# Create variables for linear regression\n#Loop through standard groups \nfor group, data in enumerate (df_stdgroup):\n #x will be the values of d34S of the stds corrected by blanks and linearity\n x_std = data[1].Correction_of_d34S_by_amplitude.values.reshape((-1,1))\n #y will be the true values of d34S of each standard\n y_std = data[1].True_d34S\n # Create model\n model = LinearRegression().fit(x_std,y_std)\n #Determine R square, intercept and slope\n r_sq_stds = model.score(x_std,y_std)\n intercept_stds = model.intercept_\n s1 = model.coef_\n slope_stds = s1[0]\n # Store parameters and group as list\n params = (data[1].Std_group.unique(), r_sq_stds, intercept_stds, slope_stds)\n \n # Convert list to pandas Series\n series = pd.Series(params, index=names) \n # Append parameters to dataframe\n df_linreg_stds = df_linreg_stds.append(series, ignore_index=True)\n#Round the values of the dataframe to two decimal places\ndf_linreg_stds = df_linreg_stds.round(2) \ndf_linreg_stds \n\n# %%\n#Apply the standard correction to the samples based on their area\n#Apply corrections by true value and area\n\n# Initialize lists to save values\nslopes = []\nintercepts = []\nCorrection_of_d34S_by_true_value = []\n\n#loop through rows in dataframe\nfor index, row in df_data.iterrows():\n # Extract standard group\n Std_group = row.Std_group\n # Extract slope and intercept\n slope = df_linreg_stds[df_linreg_stds.Std_group == Std_group].Slope.iloc[0]\n intercept = df_linreg_stds[df_linreg_stds.Std_group == Std_group].Intercept.iloc[0]\n slopes.append(slope)\n intercepts.append(intercept)\n # Compute corrected concentration\n Correction_of_d34S_by_true_value.append(intercept + slope * row.Correction_of_d34S_by_amplitude)\n \n# Append values to dataframe\ndf_data['Correction_of_d34S_by_true_value'] = Correction_of_d34S_by_true_value\n\ndf_data.head()\n\n# %%\n# Create dataframe that included only data from the samples\ndf_samples = df_data[(df_data.Type == 'Sample')]\n\n# Determine point 0 in d34s fom linear regression of the data\n\n# Group data by ID and replicate\ndf_group = df_samples.groupby(['Identifier','Replicate'])\n\n# Define column names\nnames = ['Identifier', 'Correction_of_d34S_by_true_value', 'r_sq']\n\n# Initialize empty dataframe to save fit results\ndf_linreg_d34s = pd.DataFrame(columns=names)\n\n# Create variables for linear regression\n# Loop through grouped data\nfor i, (group, data) in enumerate (df_group):\n #x will be the time\n xi = (data['Time_min'].values).reshape((-1,1))\n #y will be the corrected d34S value\n yi = (data['Correction_of_d34S_by_true_value'])\n # Create model\n model = LinearRegression().fit(xi,yi)\n #Determine R squared and intercept \n r_sq = model.score(xi,yi)\n intercept = model.intercept_\n # Store parameters and group as list\n params = (data.Identifier.unique()[0], intercept, r_sq)\n # Convert list to pandas Series\n series = pd.Series(params, index=names) \n # Append parameters to dataframe\n df_linreg_d34s = df_linreg_d34s.append(series, ignore_index=True)\n# Round the values in the dataframe to 2 decimal digits\ndf_linreg_d34s = df_linreg_d34s.round(2) \n\n#Add column with replicate and time to the linear reg. dataframe\ndf_linreg_d34s ['Time_min'] = (0,0,0,0,0,0)\ndf_linreg_d34s ['Replicate'] = ('a','b','c','a','b','c')\ndf_linreg_d34s ['Type'] = ('Sample','Sample','Sample','Sample','Sample','Sample')\n\n#Append data of time 0 to the main dataframe\nframes = [df_data, df_linreg_d34s]\ndf_data = pd.concat(frames, sort=False)\ndf_data.head()\n\n# %%\n# Create sample dataframe\ndf_samples = df_data[(df_data.Type == 'Sample')]\ndf_samples.head()\n\n# %%\n# Export data table\ndf_samples.to_csv(f'{homedir}/data/processed/EA/20191219_EA.csv')\n\n# %%\n# Calculate the analytical repeatibility of the measurements\n# Update standard df\n# Create a dataframe for only standards from the updated data dataframe\ndf_standards = df_data[(df_data.Type == 'Standard')]\n#Sort values\ndf_standards = df_standards.sort_values(['Identifier', 'Area_All'])\n\n#Group standards by identifier and amount\ngrouped_standards = df_standards.groupby(['Identifier', 'Amount'])\n\n# Determine the mean of each standard and rename the series\nmean_stds = grouped_standards['Correction_of_d34S_by_true_value'].mean()\nmean_stds = mean_stds.rename(\"d34S_mean\")\n\n# Determine the standard deviation of each standard and rename the series\nstd_dev_stds = grouped_standards['Correction_of_d34S_by_true_value'].std()\nstd_dev_stds = std_dev_stds.rename(\"d34S_stdev\")\n\n#Pass series to individual dataframes\ndf_mean_stds=mean_stds.to_frame()\ndf_std_dev_stds=std_dev_stds.to_frame()\n\n#merge the mean and standard deviation dataframes\ndf_anrep = pd.merge(df_mean_stds, df_std_dev_stds, how='outer', on=['Identifier', 'Amount'])\n\n#Reset index of the dataframe\ndf_anrep = df_anrep.reset_index()\n\n#Add column of true value of the standards\ndf_anrep.loc[df_anrep['Identifier'] \\\n == 'Sulfanilamide', 'True_d34S'] = 2.42\ndf_anrep.loc[df_anrep['Identifier'] \\\n == 'SW', 'True_d34S'] = 21\n\n#Determine the accuracy by subtracting the true value from the average value of each standard\ndf_anrep ['Accuracy'] = abs(df_anrep ['True_d34S'] - df_anrep ['d34S_mean'])\ndf_anrep\n\n# %%\n","sub_path":"code/processing/EA/DddK_DddQ_EA_corrections.py","file_name":"DddK_DddQ_EA_corrections.py","file_ext":"py","file_size_in_byte":10599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"136123348","text":"from flask import Flask, request, render_template\napp = Flask(__name__)\nimport numpy as np\nfrom model import model\n\n@app.route('/',methods=['POST','GET'])\ndef hello_world():\n if request.method == 'POST':\n e = int(request.form['experience'])\n t = int(request.form['test_score'])\n i = int(request.form['interview_score'])\n data = np.array([[e,t,i]])\n output = str(round(model.predict(data)[0],2))\n return render_template('result.html', result= output)\n else:\n return render_template('index.html')\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"deployment/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"620552951","text":"#!/usr/bin/env python\nimport roslib; \nroslib.load_manifest(\"fmDecisionMakers\")\nimport rospy\n\nimport smach\nimport smach_ros\n\nimport threading\n\nimport behaviours\nimport behaviours.wii_states.wii_auto_manuel\nfrom geometry_msgs.msg import PoseStamped\nfrom nav_msgs.msg import Path\n\nfrom fmExecutors.msg import sprayAction,sprayGoal\n\nimport numpy\nimport math\n\ndef do_abort(arg):\n return True\n\ndef displace_AB_line(path,offset):\n \"\"\"\n Displaces a AB line by creating a line local coordinate system where \n x is pointing in the direction of the line, the offset is then the y part\n of this local coordinate system\n \"\"\"\n \n # convert positions to matrix\n a = numpy.transpose(numpy.matrix([path.poses[0].pose.position.x,path.poses[0].pose.position.y],float))\n b = numpy.transpose(numpy.matrix([path.poses[1].pose.position.x,path.poses[1].pose.position.y],float))\n \n # calculate line vector\n v = b-a\n # calculate the angle of the line local coordinate system\n an = math.atan2(v[1],v[0])\n \n # create rotation matrix for which to rotate the local displacement into the coordinate system\n rot = numpy.matrix([[math.cos(an),-math.sin(an)],[math.sin(an),math.cos(an)]],float)\n # create a point in the line local coordinate system which will be rotated and translated\n # into new_a and new_b\n disp = numpy.transpose(numpy.matrix([0,offset],float))\n \n # rotate the point\n xy_rot = rot*disp\n \n # translate the point with respect to a and b\n a_trans = xy_rot + a\n b_trans = xy_rot + b\n \n # update the path with the offsetted line\n path.poses[0].pose.position.x = a_trans[0]\n path.poses[0].pose.position.y = a_trans[1]\n \n path.poses[1].pose.position.x = b_trans[0]\n path.poses[1].pose.position.y = b_trans[1]\n \n\ndef load_path(filename):\n path = Path()\n \n with open(filename) as file:\n for line in file:\n if not line.startswith(\"#\"):\n p = PoseStamped()\n x,y=line.split()\n p.header.frame_id = \"odom_combined\"\n p.pose.position.x = float(x)\n p.pose.position.y = float(y)\n path.poses.append(p)\n \n return path\n\ndef load_path2(filename):\n path = Path()\n first = True\n off_x = 0\n off_y = 0\n with open(filename) as file:\n for line in file:\n if not line.startswith(\"#\"):\n p = PoseStamped()\n y,x=line.split()\n p.header.frame_id = \"odom_combined\"\n if first:\n p.pose.position.x = 0.0\n p.pose.position.y = 0.0\n off_x = float(x)\n off_y = float(y)\n first = False \n else:\n p.pose.position.x = float(x) - off_x\n p.pose.position.y = float(y) - off_y\n path.poses.append(p)\n \n return path\n\ndef build_nav_sm(fn,offset,rev,spray):\n \n path = load_path(fn)\n \n if rev:\n path.poses.reverse()\n \n #displace_AB_line(path, offset)\n \n pf = behaviours.PlanFollow(path, \"/fmExecutors/follow_path\")\n \n plan_sm = smach.StateMachine(outcomes=[\"succeeded\",\"aborted\",\"preempted\"])\n \n with plan_sm:\n smach.StateMachine.add(\"FOLLOW_PLAN\",\n pf,\n transitions={\"succeeded\":\"DONE\",\"aborted\":\"aborted\",\"preempted\":\"preempted\"})\n smach.StateMachine.add(\"DONE\",\n behaviours.wait_state.WaitState(rospy.Duration(1)),\n transitions={\"succeeded\":\"DONE\"})\n \n if spray:\n auto_sm = smach.Concurrence(outcomes=[\"succeeded\",\"aborted\",\"preempted\"], default_outcome=\"succeeded\", child_termination_cb=do_abort)\n \n with auto_sm:\n smach.Concurrence.add(\"FOLLOW_PLAN\",\n plan_sm)\n smach.Concurrence.add(\"SPRAY\",\n smach_ros.SimpleActionState(\"/spray\", sprayAction, goal=sprayGoal(distance=0.25))\n )\n else:\n auto_sm = plan_sm\n \n sm = behaviours.wii_states.wii_auto_manuel.create(auto_sm, \"/fmHMI/joy\", 2)\n \n return sm\n\n\nif __name__ == \"__main__\":\n rospy.init_node(\"field_mission\")\n \n fn = rospy.get_param(\"~path_file\")\n offset = rospy.get_param(\"~offset\")\n rev = rospy.get_param(\"~reverse\")\n spray = rospy.get_param(\"~spray\")\n \n master = build_nav_sm(fn,offset,rev,spray)\n \n intro_server = smach_ros.IntrospectionServer('field_mission',master,'/FIELDMISSION')\n intro_server.start() \n \n smach_thread = threading.Thread(target = master.execute)\n smach_thread.start()\n \n rospy.spin();\n\n master.request_preempt()\n intro_server.stop()\n \n","sub_path":"fmApp/sdu_weeding_trial_2013/mission_planners/armadillo_sprayer.py","file_name":"armadillo_sprayer.py","file_ext":"py","file_size_in_byte":4860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"438516069","text":"# (c) Copyright [2018-2020] Micro Focus or one of its affiliates.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# You may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n#!/usr/bin/env python\nimport collections\nfrom setuptools import setup, find_packages\n\n\nReqOpts = collections.namedtuple('ReqOpts', ['skip_requirements_regex', 'default_vcs'])\n\nopts = ReqOpts(None, 'git')\nsetup(\n name='VerticaPy',\n version='0.3.0',\n description='A Python library that exposes sci-kit like functionality to conduct data science projects on data stored in Vertica.',\n author='Badr Ouali',\n author_email='badr.ouali@vertica.com',\n url='https://github.com/vertica/VerticaPy',\n keywords=\"machine-learning database vertica\",\n packages=find_packages(),\n license=\"Apache License 2.0\",\n install_requires=[\n ],\n classifiers=[\n \"Development Status :: 2 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Data Science\",\n \"Topic :: Machine Learning\",\n \"Topic :: Database\",\n \"Topic :: Database :: Database Engines/Servers\",\n \"Operating System :: OS Independent\"\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"488224763","text":"import os\nimport shutil\n\n\ndef get_select():\n print('='*60)\n print('Select an action:')\n print('\\t[1] - Go to directory.')\n print('\\t[2] - View the contents of the current directory.')\n print('\\t[3] - Delete directory.')\n print('\\t[4] - Create a directory.')\n print('\\t[q] - Quit the program.')\n print('='*60)\n action = input('Enter an action\\n>>> ')\n return action\n\n\ndef get_dir():\n print('='*60)\n name = input('Enter the name of the new directory\\nIf you want to'\n ' leave enter \\'exit\\' or press \\'Enter\\'\\n>>> ')\n dir_path = os.path.join(os.getcwd(), name)\n\n try:\n if name != 'exit' and name != '':\n os.mkdir(dir_path)\n print(f'Directory \\'{dir_path}\\' successfully created')\n else:\n print('You left')\n except FileExistsError:\n print('Can not create, directory is exist')\n finally:\n print('='*60)\n\n\ndef del_dir():\n print('='*60)\n while True:\n name = input('Enter the name of directory for delete\\nIf you want to'\n ' leave enter \\'exit\\' or press \\'Enter\\'\\n>>> ')\n dir_path = os.path.join(os.getcwd(), name)\n\n try:\n if name != 'exit' and name != '':\n if not os.path.exists(dir_path):\n raise FileNotFoundError\n else:\n while True:\n try:\n conf = input('Are you sure? Enter y/n\\n>>> ')\n if conf != 'y' and conf != 'n':\n raise ValueError\n elif conf == 'y':\n shutil.rmtree(dir_path)\n print(f'Directory \\'{dir_path}\\' successfully'\n f' deleted')\n break\n else:\n print('You left')\n break\n except ValueError:\n print('Wrong command name')\n else:\n print('You left')\n break\n except FileNotFoundError:\n print('No such directory.')\n finally:\n print('='*60)\n\n\ndef get_dir_ls():\n print('='*60)\n print('The contents of the current directory.\\n')\n print(os.listdir(os.getcwd()))\n print(os.path.abspath(__file__))\n print('='*60)\n\n\ndef change_dir():\n print('='*60)\n name = input('Change directory. Enter the path to the directory.'\n '\\nIf you want to leave enter \\'exit\\''\n ' or press \\'Enter\\'.\\n>>> ')\n path_dir = os.path.join(os.getcwd(), name)\n\n try:\n if name != 'exit' and name != '':\n os.chdir(path_dir)\n print(f'Successfully moved to the directory \\'{path_dir}\\'')\n print(os.listdir(os.getcwd()))\n else:\n print('You left')\n except FileNotFoundError:\n print('No such directory or the path to the directory is incorrectly'\n ' specified.')\n finally:\n print('='*60)\n\n\nif __name__ == '__main__':\n get_select()\n get_dir()\n del_dir()\n get_dir_ls()\n change_dir()\n","sub_path":"the_simplest_console_utility/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"378592075","text":"from tqsdk import TqApi, TqSim, TqBacktest,TqAccount, TargetPosTask\nimport datetime\nfrom tqsdk.ta import KD\nimport sys\nimport os\nimport time\n\ntest = False\nplatform = \"A安粮期货\"\naccount = \"80069035\"\npassword = \"331783\"\nname = \"DCE.jd2009\"\ngride = 5\nsell_gride = 5\nevery_count = 1\nstart_date = datetime.date(2020, 4, 29)\nend_date = datetime.date(2020, 4, 29)\nmoney = 200000\n\ngride_table = [\n [4158, 1, 1],\n\t[4153, 2, 2],\n\t[4148, 3, 3],\n\t[4143, 4, 4],\n\t[4138, 5, 5],\n\t[4133, 6, 6],\n\t[4128, 7, 7],\n\t[4123, 8, 8],\n\t[4118, 9, 9],\n\t[4113,10,10]\n]\n\napi = None\nif test:\n print(\"开始回测\",name,start_date,end_date)\n api = TqApi(TqSim(money),backtest=TqBacktest(start_dt=start_dt, end_dt=end_dt))\nelse:\n api = TqApi(TqAccount(platform, account, password))\n\nquote = api.get_quote(name)\nposition = api.get_position(name)\nklines = api.get_kline_serial(name, 60)\n\nopen = 0\nclose = 0\ntotal = 0\n\ndef count_order():\n global open\n global close\n global total\n orders = api.get_order()\n for id in orders:\n o = api.get_order(id)\n if o.status != \"ALIVE\":\n continue\n if o.instrument_id != name[4:]:\n continue\n print(o)\n if o.offset==\"OPEN\" and o.direction==\"BUY\":\n open = open + o.volume_left \n if o.offset==\"CLOSE\" and o.direction==\"SELL\":\n close = close + o.volume_left\n total = position.pos_long + open - close\n\ndef get_kd():\n kd = KD(klines, 9,3,3)\n if \"k\" in kd and \"d\" in kd and len(kd[\"k\"])>0 and len(kd[\"d\"]) > 0: \n k = int(list(kd[\"k\"])[-1])\n d = int(list(kd[\"d\"])[-1])\n return k,d\n return 0,0\n\nwhile True:\n api.wait_update(int(time.time())+1)\n if api.is_changing(quote):\n count_order()\n # need close\n need_close = 0\n for v in gride_table:\n if quote.bid_price1 > v[0]:\n break\n need_close = total - v[1]\n if need_close > position.pos_long:\n need_close = position.pos_long\n k,d = get_kd()\n if k < d + 10 and need_close > 0:\n api.insert_order(symbol=name, direction=\"SELL\", offset=\"CLOSE\", volume=need_close, limit_price=quote.bid_price1)\n continue\n\n # need open\n need_open = 0\n for v in buy_table:\n if quote.ask_price1 > v[0]:\n break\n need_open = v[1]\n k,d = get_kd()\n if k - d > -15 and total < need_open:\n api.insert_order(symbol=name, direction=\"BUY\", offset=\"OPEN\", volume=need_open-total, limit_price=quote.ask_price1)\ninput(\"\")","sub_path":"jd/auto/gride/gride_up09_with_kd.py","file_name":"gride_up09_with_kd.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"595348650","text":"from django.shortcuts import render,get_object_or_404\nfrom django.core.paginator import Paginator,EmptyPage,PageNotAnInteger\nfrom .models import ArticleColumn,ArticlePost\nfrom django.contrib.auth.models import User\n\ndef article_titles(request,username=None):\n if username:\n user=User.objects.get(username=username)\n article_title=ArticlePost.objects.filter(author=user)\n try:\n userinfo=user.userinfo\n except:\n userinfo=None\n else:\n article_title=ArticlePost.objects.all()\n pagintor=Paginator(article_title,2)\n page=request.GET.get(\"page\")\n try:\n current_page=pagintor.page(page)\n articles=current_page.object_list\n except PageNotAnInteger:\n current_page=pagintor.page(1)\n articles=current_page.object_list\n except EmptyPage:\n current_page=pagintor.page(pagintor.num_pages)\n articles=current_page.object_list\n if username:\n return render(request,\"article/list/author_articles.html\",\n {\"articles\":articles,\"page\":current_page,\n \"userinfo\":userinfo,\"user\":user})\n return render(request,\"article/list/article_titles.html\",\n {\"articles\":articles,\"page\":current_page})\n\ndef article_detail(request,id,slug):\n article=get_object_or_404(ArticlePost,id=id,slug=slug)\n return render(request,\"article/list/article_content.html\",{\"article\":article})","sub_path":"article/list_views.py","file_name":"list_views.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"419898594","text":"import os\nfrom csv_utf_support import CSVUnicodeWriter, CSVUnicodeReader\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\nitem_filepath = os.path.join(basedir, 'Item.csv')\nitem_uog_level = os.path.join(basedir, 'Item-UOG_LEVEL-Item.csv')\nitem_in_school = os.path.join(basedir, 'Item-IN_SCHOOL-Item.csv')\nitem_is_subject_of = os.path.join(basedir, 'Item-IS_SUBJECT_OF-Item.csv')\n\nlevels_fp = os.path.join(basedir, 'levels.csv')\nschools_fp = os.path.join(basedir, 'schools.csv')\nsubjects_fp = os.path.join(basedir, 'subjects.csv')\n\n\ndef fill_dict(fp):\n d = {}\n with open(fp) as entity_file:\n csv_reader = CSVUnicodeReader(entity_file)\n headers = csv_reader.next()\n for line in csv_reader:\n d[line[0]] = line[1:]\n return d\n\nlevels = fill_dict(levels_fp)\nschools = fill_dict(schools_fp)\nsubjects = fill_dict(subjects_fp)\n\nwith open(item_filepath, mode='r') as item_f, open(item_uog_level, mode='w') as level_f, \\\n open(item_in_school, mode='w') as school_f, open(item_is_subject_of, mode='w') as subject_f:\n item_r = CSVUnicodeReader(item_f)\n level_w = CSVUnicodeWriter(level_f)\n school_w = CSVUnicodeWriter(school_f)\n subject_w = CSVUnicodeWriter(subject_f)\n item_r.next() # headers\n level_w.writerow(('uog_id', 'uog_id'))\n school_w.writerow(('uog_id', 'uog_id'))\n subject_w.writerow(('uog_id', 'wikidata_id'))\n for idx, course in enumerate(item_r):\n if idx > 4805:\n break\n course_id = course[0]\n\n course_level = course[6][1:]\n level_id = levels[course_level]\n level_w.writerow((course_id, level_id[0]))\n\n course_school = course[7][1:]\n school_id = schools[course_school]\n school_w.writerow((course_id, school_id[0]))\n\n course_subject = course[8]\n subject_ids = subjects[course_subject]\n for subject_id in subject_ids:\n if subject_id == u'':\n continue\n subject_w.writerow((course_id, subject_id))","sub_path":"uog_courses/dump/masher.py","file_name":"masher.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"615043061","text":"# Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nAn OpenFlow 1.0 L2 learning switch implementation.\n\"\"\"\n\nimport collections\n\nfrom ryu.base import app_manager\nfrom ryu.controller import ofp_event\nfrom ryu.controller.handler import MAIN_DISPATCHER, set_ev_cls\nfrom ryu.lib.packet import arp, ether_types, ethernet, icmp, ipv4, packet, tcp\nfrom ryu.ofproto import inet, ofproto_v1_0, ofproto_v1_0_parser\n\nIpPort = collections.namedtuple('IpPort', 'ip port')\n\nether_type_names = {\n ether_types.ETH_TYPE_IP: \"IPv4\",\n ether_types.ETH_TYPE_IPV6: \"IPv6\",\n ether_types.ETH_TYPE_LLDP: \"LLDP\",\n ether_types.ETH_TYPE_ARP: \"ARP\"\n}\n\n\ndef ether_type_name(ethertype):\n if ethertype in ether_type_names:\n return ether_type_names[ethertype]\n return \"UNKNOWN\"\n\n\narp_opcode_names = {arp.ARP_REPLY: \"Reply\", arp.ARP_REQUEST: \"Request\"}\n\n\ndef arp_opcode_name(opcode):\n if opcode in arp_opcode_names:\n return arp_opcode_names[opcode]\n return \"UNKNOWN\"\n\n\nip_proto_names = {\n inet.IPPROTO_ICMP: \"ICMP\",\n inet.IPPROTO_ICMPV6: \"ICMPv6\",\n inet.IPPROTO_TCP: \"TCP\",\n inet.IPPROTO_UDP: \"UDP\"\n}\n\n\ndef ip_proto_name(proto):\n if proto in ip_proto_names:\n return ip_proto_names[proto]\n return \"UNKNOWN\"\n\n\nclass SimpleSwitch(app_manager.RyuApp):\n OFP_VERSIONS = [ofproto_v1_0.OFP_VERSION]\n\n def __init__(self, *args, **kwargs):\n super(SimpleSwitch, self).__init__(*args, **kwargs)\n # { datapath_id: { mac_address: port } }\n self.mac_to_port = {}\n # { datapath_id: { ip_address: port } }\n self.ip_to_port = {}\n\n @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)\n def handle_packet_in(self, event: ofp_event.EventOFPPacketIn):\n packet_in = event.msg # type: ofproto_v1_0_parser.OFPPacketIn\n datapath_id = packet_in.datapath.id\n\n frame = packet.Packet(packet_in.data)\n eth_header = frame.get_protocol(ethernet.ethernet)\n self.mac_to_port.setdefault(datapath_id,\n {})[eth_header.src] = packet_in.in_port\n\n eth_type = eth_header.ethertype\n self.logger.info(\n \"received OFPT_PACKET_IN: buffer_id=0x%x total_len=%d in_port=%s\",\n packet_in.buffer_id, packet_in.total_len, packet_in.in_port)\n self.logger.info(\" %s -> %s, ethertype=0x%x (%s)\", eth_header.src,\n eth_header.dst, eth_type, ether_type_name(eth_type))\n if eth_type == ether_types.ETH_TYPE_ARP:\n self.handle_arp(packet_in, eth_header, frame.get_protocol(arp.arp))\n elif eth_type == ether_types.ETH_TYPE_IP:\n self.handle_ipv4(packet_in, frame, eth_header,\n frame.get_protocol(ipv4.ipv4))\n\n def handle_arp(self,\n packet_in: ofproto_v1_0_parser.OFPPacketIn,\n eth_header: ethernet.ethernet,\n arp_header: arp.arp):\n self.logger.info(\" %s -> %s, opcode=0x%x (%s)\", arp_header.src_ip,\n arp_header.dst_ip, arp_header.opcode,\n arp_opcode_name(arp_header.opcode))\n out_port = packet_in.datapath.ofproto.OFPP_FLOOD\n if arp_header.dst_mac in self.mac_to_port[packet_in.datapath.id]:\n out_port = self.mac_to_port[packet_in.datapath.id][\n arp_header.dst_mac]\n self.forward(packet_in, out_port)\n\n def forward(self, packet_in: ofproto_v1_0_parser.OFPPacketIn, port: int):\n data = None\n if packet_in.buffer_id == packet_in.datapath.ofproto.OFP_NO_BUFFER:\n data = packet_in.data\n packet_out = packet_in.datapath.ofproto_parser.OFPPacketOut(\n datapath=packet_in.datapath,\n buffer_id=packet_in.buffer_id,\n in_port=packet_in.in_port,\n data=data,\n actions=[packet_in.datapath.ofproto_parser.OFPActionOutput(port)])\n self.logger.info(\n \" sending packet_out: output packet on switch port %d\", port)\n packet_in.datapath.send_msg(packet_out)\n\n def handle_ipv4(self,\n packet_in: ofproto_v1_0_parser.OFPPacketIn,\n frame: packet.Packet,\n eth_header: ethernet.ethernet,\n ipv4_header: ipv4.ipv4):\n self.logger.info(\" %s -> %s, proto=0x%x (%s)\", ipv4_header.src,\n ipv4_header.dst, ipv4_header.proto,\n ip_proto_name(ipv4_header.proto))\n datapath_id = packet_in.datapath.id\n self.ip_to_port.setdefault(datapath_id,\n {})[ipv4_header.src] = packet_in.in_port\n if ipv4_header.proto == inet.IPPROTO_TCP:\n tcp_header = frame.get_protocol(tcp.tcp)\n self.handle_tcp(packet_in, eth_header, ipv4_header, tcp_header)\n elif ipv4_header.proto == inet.IPPROTO_ICMP:\n icmp_header = frame.get_protocol(icmp.icmp)\n self.handle_icmp(packet_in, eth_header, ipv4_header, icmp_header)\n\n def handle_tcp(self,\n packet_in: ofproto_v1_0_parser.OFPPacketIn,\n eth_header: ethernet.ethernet,\n ipv4_header: ipv4.ipv4,\n tcp_header: tcp.tcp):\n self.logger.info(\" %d -> %d\", tcp_header.src_port,\n tcp_header.dst_port)\n datapath = packet_in.datapath\n ofproto = datapath.ofproto\n out_port = ofproto.OFPP_FLOOD\n if ipv4_header.dst in self.ip_to_port[datapath.id]:\n out_port = self.ip_to_port[datapath.id][ipv4_header.dst]\n match = datapath.ofproto_parser.OFPMatch(\n dl_type=ether_types.ETH_TYPE_IP, # doesn't work without this\n nw_proto=inet.IPPROTO_TCP,\n nw_dst=ipv4_header.dst,\n tp_dst=tcp_header.dst_port)\n mod = datapath.ofproto_parser.OFPFlowMod(\n datapath=datapath,\n match=match,\n command=ofproto.OFPFC_ADD,\n idle_timeout=0,\n hard_timeout=0,\n priority=ofproto.OFP_DEFAULT_PRIORITY,\n buffer_id=packet_in.buffer_id,\n actions=[datapath.ofproto_parser.OFPActionOutput(out_port)])\n datapath.send_msg(mod)\n self.forward(packet_in, out_port)\n\n def handle_icmp(self,\n packet_in: ofproto_v1_0_parser.OFPPacketIn,\n eth_header: ethernet.ethernet,\n ipv4_header: ipv4.ipv4,\n icmp_header: icmp.icmp):\n out_port = packet_in.datapath.ofproto.OFPP_FLOOD\n datapath_id = packet_in.datapath.id\n if ipv4_header.dst in self.ip_to_port[datapath_id]:\n out_port = self.ip_to_port[datapath_id][ipv4_header.dst]\n self.forward(packet_in, out_port)\n\n @set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER)\n def _port_status_handler(self, ev):\n msg = ev.msg\n reason = msg.reason\n port_no = msg.desc.port_no\n\n ofproto = msg.datapath.ofproto\n if reason == ofproto.OFPPR_ADD:\n self.logger.info(\"port added %s\", port_no)\n elif reason == ofproto.OFPPR_DELETE:\n self.logger.info(\"port deleted %s\", port_no)\n elif reason == ofproto.OFPPR_MODIFY:\n self.logger.info(\"port modified %s\", port_no)\n else:\n self.logger.info(\"Illeagal port state %s %s\", port_no, reason)\n","sub_path":"examples/flowvisor/ryu_app.py","file_name":"ryu_app.py","file_ext":"py","file_size_in_byte":7982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"433940065","text":"\r\nimport sys\r\nimport time\r\nfrom keras.callbacks import *\r\n\r\ndef write(text,fname,reset = False):\r\n \"\"\"\r\n Auxiliar function to print loss functions in log file and terminal\r\n Inputs:\r\n text - text to print\r\n reset - if True resets *.log file\r\n fname - path to log file\r\n Outputs:\r\n None\r\n \"\"\"\r\n sys.stdout.write(text)\r\n sys.stdout.flush()\r\n fname = fname + '.log'\r\n if reset:\r\n f = open(fname, 'w')\r\n else:\r\n f = open(fname, 'a')\r\n f.write(text)\r\n f.close()\r\n\r\nclass MyCallback(Callback):\r\n \"\"\"\r\n Callback class defined for the NN training\r\n\r\n Attributes:\r\n min_val_loss - minimum registered validation loss value\r\n save_path - directory in which log file where loss values will be stored\r\n \"\"\"\r\n def __init__(self,save_path):\r\n '''\r\n Inputs:\r\n fname - - path to log file where loss values are stored\r\n '''\r\n self.min_val_loss = None\r\n self.save_path = save_path\r\n write('%-20s %5s %10s %10s\\n' % ('time', 'epoch', 'loss', 'val_loss'), self.save_path + 'train',True)\r\n def on_epoch_end(self, epoch, logs={}):\r\n '''\r\n Function called at the end of each epoch. Action performed depends if validation loss value obtained\r\n is a new minimum. If so the correspondent NN parameters are saved model_parameters.hdf. Otherwise only \r\n the train.log file will be updated with the loss values obtained\r\n '''\r\n epoch += 1\r\n loss = logs['loss']\r\n val_loss = logs['val_loss']\r\n t = time.strftime('%Y-%m-%d %H:%M:%S')\r\n if (self.min_val_loss == None) or (val_loss < self.min_val_loss):\r\n self.min_val_loss = val_loss\r\n write('%-20s %5d %10.4f %10.4f *\\n' % (t, epoch, loss, val_loss), self.save_path + 'train')\r\n if epoch >= 100:\r\n self.model.save_weights(self.save_path + 'model_parameters.hdf', overwrite=True)\r\n else:\r\n write('%-20s %5d %10.4f %10.4f\\n' % (t, epoch, loss, val_loss), self.save_path + 'train')","sub_path":"COMPASS/NN/nn_callback.py","file_name":"nn_callback.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"66400517","text":"# -*- coding: utf-8 -*-\n\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport plotly.graph_objs as go\nfrom pymongo import MongoClient\n\n# this should get collection names from\ndef create_usage_pane(start_time, \n end_time,\n collection_names):\n url = 'mongodb://localhost:27017/'\n dbname = 'HoboCI_test_test'\n client = MongoClient(url)\n db = client[dbname]\n\n print('url: %s' % url)\n print('db: %s' % dbname)\n print('create_usage_pane: collection_names = %s' % str(collection_names))\n\n cpu_scatter = list()\n mem_scatter = list()\n gpu_scatter = list()\n for collection_name in collection_names:\n collection = db[collection_name]\n\n query_command = {'timestamp': {'$gt': start_time, '$lt': end_time}}\n documents = collection.find(query_command)\n\n print(type(start_time))\n print(\"found %d documents for (%s, %s)\" % (documents.count(), start_time, end_time))\n\n cpu_stats = list()\n mem_stats = list()\n gpu_stats = list()\n timestamps = list()\n for d in documents:\n # either loop like list or reset the cursors\n cpu_stats.append(d['usage']['utime'])\n mem_stats.append(d['usage']['maxrss'] / 1e6) # want units of GB\n gpu_stats.append(d['usage']['stime'])\n timestamps.append(d['timestamp'])\n \n cpu_scatter.append(go.Scatter(x = timestamps, y = cpu_stats, name = collection_name))\n mem_scatter.append(go.Scatter(x = timestamps, y = mem_stats, name = collection_name))\n gpu_scatter.append(go.Scatter(x = timestamps, y = [0 for t in timestamps], name = collection_name))\n\n margin = go.Margin(l = 50, r = 10, t = 30, b = 70)\n cpu_stats_graph = dcc.Graph(figure = go.Figure(data = cpu_scatter,\n layout = {'title': 'CPU Time',\n 'margin': margin,\n 'legend' : {'x': 0, 'y': 1}}),\n id = 'cpu_stats_%s' % collection_name)\n\n mem_stats_graph = dcc.Graph(figure = go.Figure(data = mem_scatter, \n layout = {'title': 'Peak Memory Usage',\n 'margin': margin,\n 'legend' : {'x': 0, 'y': 1}}),\n id = 'mem_sstats_%s' % collection_name)\n\n gpu_stats_graph = dcc.Graph(figure = go.Figure(data = gpu_scatter, \n layout = {'title': 'GPU Usage',\n 'margin': margin,\n 'legend' : {'x': 0, 'y': 1}}),\n id = 'gpu_stats_%s' % collection_name) \n \n return html.Div([cpu_stats_graph, mem_stats_graph, gpu_stats_graph],\n id = 'usage-pane')\n \n \n\n","sub_path":"src/mad_dash/panes/usage_pane.py","file_name":"usage_pane.py","file_ext":"py","file_size_in_byte":3162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"369757646","text":"#!/usr/bin/python\n\nimport sys\nimport os\nimport optparse\n\n# Tool to zip repos from command line with following command\n# ./zip-repos_simple.py -l : -all -b master\n\n\ndef zip (repos, zip_name, login_str, branch):\n cloneHTTPS = \"git clone https://\"\n github =\"github.com/00qnavry00\"\n\n # if there is only one repo in the repos list, there is no need to create a project directory\n if len(repos) == 1:\n repo = repos[0]\n print(\"\\n Cloning \" + repo + \" repo, branch \" + branch + \"\\n\")\n\n os.system(cloneHTTPS + login_str + github + '/' + repo + '.git -b' + branch)\n if os.path.exists(repo):\n os.system('rm -rf %s/.git' % repo)\n os.system('zip -r9 %s.zip %s' % (zip_name, repo))\n os.system('rm -rf %s' % repo)\n\n else:\n os.makedirs(zip_name)\n os.chdir(zip_name)\n\n for repo in repos:\n print(\"\\n Cloning \" + repo + \" repo, branch \" + branch + \"\\n\")\n os.system(cloneHTTPS + login_str + github + '/' + repo + '.git -b' + branch)\n if os.path.exists(repo):\n os.system('rm -rf %s/.git' % repo)\n\n os.chdir('../')\n os.system('zip -r9 %s.zip %s' % (zip_name, zip_name))\n os.system('rm -rf %s' % zip_name)\n\n\n## Assuming that xx.x branch value is only reserved for release branches\n## If not float value is entered, entered value is assumed to be an existing branch name (Ex.: master, develop)\n\n# def full_branch_name(branch, char):\n# branch_name = branch\n# try:\n# float(branch)\n# branch_name = 'release' + char + branch\n# return branch_name\n# except ValueError:\n# return branch_name\n\ndef main(): #USED TO HAVE 'argv = None' AS PARAMETER. IF ERROR RESULTS, RETURN TO THIS\n argv = sys.argv[1:]\n try:\n # if not \"-l\" in argv:\n # print(\"\\nPlease add '-l :' as argument to this script for authentication.\\n\")\n # sys.exit()\n # else:\n # i = argv.index(\"-l\")\n # login = argv[i+1]\n #\n # if not len(login) or not ':' in login:\n # print(\"\\nPlease provide login info in the following format: -l :\\n\")\n # sys.exit()\n\n if not \"-b\" in argv:\n print(\"\\nSince '-b ' is not specified, we will zip master branch.\\n\")\n branch = 'master'\n else:\n i = argv.index(\"-b\")\n branch = argv[i+1]\n\n #\n # Opens up repo_list.txt and writes all contents into a list to be used later\n #\n with open(\"repo_list.txt\", 'r', newline='') as e:\n repo_list = []\n for i in e:\n repo_list.append(i.strip())\n # print(repo_list) FOR DEBUGGING PURPOSES\n\n # Will now look for and zip specific repositories specified by user\n if \"-repo\" in argv:\n start = argv.index(\"-repo\") + 1\n if \"-b\" in argv:\n finish = argv.index(\"-b\")\n else:\n finish = argv.index(\"master\")\n desired_repos = argv[start:finish]\n for k in desired_repos:\n if k in repo_list:\n zip([k.lower()], k, \"\", 'master')\n else:\n print(\"\\n The Repository: '\" + k + \"' was NOT found.\\n\")\n\n # Will zip all repositories\n elif \"-all\" in argv:\n for j in repo_list:\n zip([j.lower()], j, \"\", 'master')\n\n # Will advise user of improper use of syntax\n else:\n print(\"\\nPlease either include '-repo ' separated by white spaces as the script arguments for specific repositories or '-all' for all repositories.\\n\")\n\n\n except Exception as e:\n raise(e)\n\n\nif __name__ == '__main__':\n sys.exit(main())","sub_path":"zip-repos_simple.py","file_name":"zip-repos_simple.py","file_ext":"py","file_size_in_byte":3852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"494220605","text":"from django import forms\nfrom django.contrib.gis import forms as gisforms\n#from .widgets import OpenLayersWidget\nfrom .models import PoliceStation, Hospital, Customer, User\nfrom django.contrib.auth.forms import UserCreationForm, UserChangeForm\nfrom django.conf import settings\n\n\nclass CustomUserCreationForm(UserCreationForm):\n\n class Meta(UserCreationForm.Meta):\n model = User\n fields = ('username', 'email')\n\nclass CustomUserChangeForm(UserChangeForm):\n\n class Meta:\n model = User\n fields = ('username', 'email')\n\n\n\nclass PoliceStationForm(forms.ModelForm):\n class Meta:\n model = PoliceStation\n exclude = ['admin'] \n widgets = {\n 'location' : gisforms.OSMWidget(attrs={'map_width': 800, \n 'map_height': 500,\n 'default_lon': 76.614787,\n 'default_lat': 27.552189,\n 'display_raw': True})\n }\n\n\nclass HospitalForm(forms.ModelForm):\n class Meta:\n model = Hospital\n exclude = ['admin']\n widgets = {\n 'location' : gisforms.OSMWidget(attrs={'map_width': 800, \n 'map_height': 500,\n 'default_lon': 76.614787,\n 'default_lat': 27.552189,\n 'display_raw': True})\n }\n\n\nclass CustomerForm(forms.ModelForm):\n class Meta:\n model = Customer\n exclude = ['user']","sub_path":"account/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"590989991","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n'''\nDate: 2018/11/23\nAuthor: Xu Yucheng\nAbstract: Code for create test data\n'''\nimport os\nimport sys\nimport cv2\nimport shutil\nimport random\n\nDATA_BASE = '../../dataset/TestData'\nTEST_DATA = '../TestData'\nDES_DIR = '../TestData'\n\n\ndef copy_image(from_path, to_path):\n shutil.copy(from_path, to_path)\n\ndef resize_image(image):\n return cv2.resize_image(image,(64,64))\n\ndef get_id(str1, str2):\n length = len(str2)\n label = str1[length+1:]\n return label\n\ndef find_image(path=DATA_BASE):\n os.chdir(TEST_DATA)\n os.system(\"rm -rf *\")\n labels = os.listdir(path)\n for label in labels:\n DES_DIR = os.path.abspath(os.path.join(TEST_DATA,label))\n if not os.path.exists(DES_DIR):\n os.chdir(TEST_DATA)\n os.mkdir(label)\n folder_path = os.path.abspath(os.path.join(DATA_BASE, label))\n #for _ in range(10) means create 10 random int\n # _ can be replaced by any char\n indexes = [random.randint(10000,10047)]\n for index in indexes:\n filename = str(index) + '.png'\n filepath = os.path.abspath(os.path.join(folder_path, filename))\n copy_image(filepath, DES_DIR)\n print(\"\\033[0;31;40m Images in %s have been moved to %s\\033[0m\"%(folder_path, DES_DIR))\n return 0\n\n\n\n\n \n\n\n","sub_path":"pytorch/src/create_test_data.py","file_name":"create_test_data.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"178606386","text":"# videos/events - views\n\n# 404 error = something was deleted in the database and is not available -> is a good way to show responses\n#from django.http import Http404\n#from django.http import HttpResponse\nfrom django.shortcuts import render, get_object_or_404\n#from django.template import loader\nfrom .models import Event, Video, Artist\n\ndef index(request):\n\t# list of all events\n\tall_events = Event.objects.all()\n\tall_artists = Artist.objects.all()\n\t# to import a template for the webpage \n\t# Django know that in the videos are the folder template and then searchers for the html file\n\t#template = loader.get_template('videos/index.html')\n\t# create a dictionary\n\tcontext = {'all_events' : all_events, 'all_artists': all_artists}\n\t# the HttpResponse is already implemented in the render function\n\treturn render(request, 'videos/index.html', context) \n\n\n#def index(request):\n\t# list of all events\n#\tall_events = Event.objects.all()\n#\thtml = ''\n#\t# loop through all events\n#\tfor event in all_events:\n#\t\turl = '/videos/' + str(event.id) + '/'\n#\t\thtml += '' + event.place_name + '
'\n#\t# html will be created and transfered to the variable html\n#\treturn HttpResponse(html)\n\n# details for each event (inclusive the videos)\n#def detail(request, event_id):\n#\ttry:\n#\t\t#get a specific object -> based on the id\n#\t\tevent = Event.objects.get(pk=event_id)\n#\texcept Event.DoesNotExist:\n#\t\traise Http404(\"This event doesn't exist.\")\n#\t# thats the dictionary \"event: event\" the page will render \n#\treturn render(request, 'videos/detail.html', {'event': event}) \n\n# details for each event (inclusive the videos) + shortcur (doesn't need the try except part)\ndef detail(request, event_id):\n\t# if the object exists get the value for the event (event_id, if not raise an 404 error) / it is a try except statement\n\tevent = get_object_or_404(Event, pk=event_id)\n\t# thats the dictionary \"event: event\" the page will render \n\treturn render(request, 'videos/detail.html', {'event': event}) \t\n\n\ndef favorite(request, event_id):\n\tevent = get_object_or_404(Event, pk=event_id)\n\ttry: \n\t\tselected_video = event.video_set.get(pk=request.POST[ 'video' ])\n\texcept(KeyError, Video.DoesNotExist):\n\t\treturn render(request, 'videos/detail.html', {\n\t\t\t'event': event,\n\t\t\t'error_message': 'You did not select a valid video', \n\t\t\t})\n\telse:\n\t\t#changes the attribute of the variable video.is_favorite to true\n\t\tselected_video.is_favorite = True\n\t\t# save the change in the database\n\t\tselected_video.save()\n\t\treturn render(request,'videos/detail.html', {'event': event })\n","sub_path":"videos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"256787463","text":"# -*- coding: utf-8 -*-\n#!/usr/bin/env python\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport sys\nimport webapp2\n\n# Use routines from this base class\nimport base\n\n\n# Handling Ahom and other language codes for testing font and conversions.\n# Should this be inherited from base.languageTemplate?\nclass langInfo():\n def __init__(self):\n self.LanguageCode = 'phk'\n self.Language = 'Tai Phake'\n self.Language_native = 'Language in Tai Phake'\n\n self.encoding_font_list = [\n {\n 'font_path': '/fonts/ahom_aiton/PHAKE.TTF',\n 'font_name': 'Phake',\n 'display_name': 'Phake',\n },\n {\n 'font_path': '/fonts/ahom_aiton/PHAKERAM.TTF',\n 'font_name': 'Phakeramayana',\n 'display_name': 'Phake Ramayana',\n },\n {\n 'font_path': '/fonts/ahom_aiton/AITON.TTF',\n 'font_name': 'Aiton',\n 'display_name': 'Aiton',\n },\n ]\n\n self.unicode_font_list = [\n { 'source': '/fonts/Padauk-Regular.ttf',\n 'family': 'Padauk',\n 'longName': 'Padauk',\n },\n { 'source': '/fonts/NotoSansMyanmar-Regular.ttf',\n 'family': 'NotoSansMyanmar ',\n 'longName': 'Noto Sans Myanmar',\n },\n ]\n\n self.lang_list = [\n {'shortName': 'phk',\n 'longName': 'Phake'\n },\n {'shortName': 'aio',\n 'longName': 'Aiton'\n },\n {'shortName': 'kht',\n 'longName': 'Khamti'\n },\n ]\n\n self.links = [\n {'linkText': 'Keyboard',\n 'ref': '/aho/'\n },\n {'linkText': 'Converter',\n 'ref': '/' + self.LanguageCode + '/convertUI/'\n },\n {'linkText': 'Font conversion summary',\n 'ref': '/' + self.LanguageCode + '/encodingRules/'\n },\n {'linkText': 'Myanmar Unicode block',\n 'ref': 'http://www.unicode.org/charts/PDF/U1000.pdf'\n },\n {'linkText': 'Myanmar Unicode extension A block',\n 'ref': 'https://www.unicode.org/charts/PDF/UAA60.pdf'\n },\n {'linkText': 'Resources',\n 'ref': '/' + self.LanguageCode + '/downloads/'\n },\n ]\n\n self.kb_list = self.lang_list\n\n # Resource files\n self.text_file_list = [\n '/download/aho/3-5-1-1.txt',\n '/download/aho/nemi_mang_text.txt'\n ]\n\n self.baseHexUTF16 = u'\\ud805\\udf00'\n self.base_consonant = u'\\ud805\\udf00'\n\n if sys.maxunicode >= 0x10000:\n self.unicodeChars = [unichr(x) for x in range(0x11700, 0x1173f)]\n self.diacritic_list = [unichr(x) for x in range(0x1171d, 0x1172c)]\n else:\n self.unicodeChars = [unichr(0xd805) + unichr(0xdd00 + x) for x in range(0x00, 0x3f)]\n self.diacritic_list = [unichr(0xd805) + unichr(0xdd00 + x) for x in range(0x1d, 0x2c)]\n\n\n # Python-based transliteration tool.\n self.transliterator = None\n\n # Test data for showing in converter.\n self.test_data = [\"ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz\"\n ]\n\n return\n\n\nlangInstance = langInfo()\napp = webapp2.WSGIApplication(\n [\n ('/phk/', base.LanguagesHomeHandler),\n ('/phk/keyboard/', base.LanguagesHomeHandler),\n ('/phk/convertUI/', base.ConvertUIHandler),\n ('/phk/downloads/', base.Downloads),\n ('/phk/converter/', base.ConvertUIHandler),\n ('/phk/encodingRules/', base.EncodingRules),\n ('/phk/diacritic/', base.DiacriticHandler),\n ('/phk/render/', base.EncodingRules),\n ],\n debug=True,\n config={'langInfo': langInstance}\n)\n","sub_path":"phake.py","file_name":"phake.py","file_ext":"py","file_size_in_byte":3989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"367480898","text":"import json\n\nimport typer\nfrom more_itertools import chunked\n\nfrom connection import app_search, engine_name\n\n\ndef upload_dict(data):\n for body in chunked(data, 100):\n app_search.index_documents(\n engine_name=engine_name,\n documents=body,\n )\n\n\ndef upload(filename: typer.FileText, gen_ids: bool = typer.Option(False, \"-i\")):\n json_data = json.load(filename)\n\n if gen_ids:\n for entry in json_data:\n print(entry)\n entry[\"id\"] = hash_id(entry[\"name\"] + entry[\"url\"])\n\n upload_dict(json_data)\n\n\nif __name__ == \"__main__\":\n typer.run(upload)\n","sub_path":"upload/upload_to_appsearch.py","file_name":"upload_to_appsearch.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"85640156","text":"\"\"\"\r\n===============================================\r\nvidgear library source-code is deployed under the Apache 2.0 License:\r\n\r\nCopyright (c) 2019 Abhishek Thakur(@abhiTronix) \r\n\r\nLicensed under the Apache License, Version 2.0 (the \"License\");\r\nyou may not use this file except in compliance with the License.\r\nYou may obtain a copy of the License at\r\n\r\n http://www.apache.org/licenses/LICENSE-2.0\r\n\r\nUnless required by applicable law or agreed to in writing, software\r\ndistributed under the License is distributed on an \"AS IS\" BASIS,\r\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\nSee the License for the specific language governing permissions and\r\nlimitations under the License.\r\n===============================================\r\n\"\"\"\r\n\r\n# Contains all the support functions/modules required by Vidgear\r\n\r\n# import the necessary packages\r\nimport os, sys, requests, platform, errno\r\nimport numpy as np\r\nfrom pkg_resources import parse_version\r\nfrom colorlog import ColoredFormatter\r\nfrom tqdm import tqdm\r\nimport logging as log\r\n\r\ntry:\r\n # import OpenCV Binaries\r\n import cv2\r\n\r\n # check whether OpenCV Binaries are 3.x+\r\n if parse_version(cv2.__version__) < parse_version(\"3\"):\r\n raise ImportError(\r\n \"[Vidgear:ERROR] :: Installed OpenCV API version(< 3.0) is not supported!\"\r\n )\r\nexcept ImportError:\r\n raise ImportError(\r\n \"[Vidgear:ERROR] :: Failed to detect correct OpenCV executables, install it with `pip3 install opencv-python` command.\"\r\n )\r\n\r\n\r\ndef logger_handler():\r\n \"\"\"\r\n returns logger handler\r\n \"\"\"\r\n # logging formatter\r\n formatter = ColoredFormatter(\r\n \"%(bold_blue)s%(name)s%(reset)s :: %(log_color)s%(levelname)s%(reset)s :: %(message)s\",\r\n datefmt=None,\r\n reset=True,\r\n log_colors={\r\n \"INFO\": \"bold_green\",\r\n \"DEBUG\": \"bold_yellow\",\r\n \"WARNING\": \"bold_purple\",\r\n \"ERROR\": \"bold_red\",\r\n \"CRITICAL\": \"bold_red,bg_white\",\r\n },\r\n )\r\n # define handler\r\n handler = log.StreamHandler()\r\n handler.setFormatter(formatter)\r\n return handler\r\n\r\n\r\n# define logger\r\nlogger = log.getLogger(\"Helper\")\r\nlogger.addHandler(logger_handler())\r\nlogger.setLevel(log.DEBUG)\r\n\r\n\r\ndef check_CV_version():\r\n \"\"\"\r\n returns OpenCV binary in-use version first bit \r\n \"\"\"\r\n if parse_version(cv2.__version__) >= parse_version(\"4\"):\r\n return 4\r\n else:\r\n return 3\r\n\r\n\r\ndef mkdir_safe(dir, logging=False):\r\n \"\"\"\r\n Simply creates directory safely\r\n \"\"\"\r\n try:\r\n os.makedirs(dir)\r\n if logging:\r\n logger.debug(\"Created directory at `{}`\".format(dir))\r\n except OSError as e:\r\n if e.errno != errno.EEXIST:\r\n raise\r\n if logging:\r\n logger.debug(\"Directory already exists at `{}`\".format(dir))\r\n\r\n\r\ndef capPropId(property):\r\n \"\"\"\r\n Retrieves the OpenCV property's Integer(Actual) value. \r\n \"\"\"\r\n integer_value = 0\r\n try:\r\n integer_value = getattr(cv2, property)\r\n except Exception as e:\r\n logger.exception(str(e))\r\n logger.critical(\"`{}` is not a valid OpenCV property!\".format(property))\r\n return None\r\n return integer_value\r\n\r\n\r\ndef dict2Args(param_dict):\r\n \"\"\"\r\n converts dict to list(args)\r\n \"\"\"\r\n args = []\r\n for key in param_dict.keys():\r\n args.append(key)\r\n args.append(param_dict[key])\r\n return args\r\n\r\n\r\ndef get_valid_ffmpeg_path(\r\n custom_ffmpeg=\"\", is_windows=False, ffmpeg_download_path=\"\", logging=False\r\n):\r\n \"\"\"\r\n Validate the FFmpeg path/binaries and returns valid FFmpeg file executable location(also downloads static binaries on windows) \r\n \"\"\"\r\n final_path = \"\"\r\n if is_windows:\r\n # checks if current os is windows\r\n if custom_ffmpeg:\r\n # if custom FFmpeg path is given assign to local variable\r\n final_path += custom_ffmpeg\r\n else:\r\n # otherwise auto-download them\r\n try:\r\n if not (ffmpeg_download_path):\r\n # otherwise save to Temp Directory\r\n import tempfile\r\n\r\n ffmpeg_download_path = tempfile.gettempdir()\r\n\r\n if logging:\r\n logger.debug(\r\n \"FFmpeg Windows Download Path: {}\".format(ffmpeg_download_path)\r\n )\r\n\r\n # download Binaries\r\n os_bit = (\r\n (\"win64\" if platform.machine().endswith(\"64\") else \"win32\")\r\n if is_windows\r\n else \"\"\r\n )\r\n _path = download_ffmpeg_binaries(\r\n path=ffmpeg_download_path, os_windows=is_windows, os_bit=os_bit\r\n )\r\n # assign to local variable\r\n final_path += _path\r\n\r\n except Exception as e:\r\n # log if any error occurred\r\n if logging:\r\n logger.exception(str(e))\r\n logger.debug(\r\n \"Error in downloading FFmpeg binaries, Check your network and Try again!\"\r\n )\r\n return False\r\n\r\n if os.path.isfile(final_path):\r\n # check if valid FFmpeg file exist\r\n pass\r\n elif os.path.isfile(os.path.join(final_path, \"ffmpeg.exe\")):\r\n # check if FFmpeg directory exists, if does, then check for valid file\r\n final_path = os.path.join(final_path, \"ffmpeg.exe\")\r\n else:\r\n # else return False\r\n if logging:\r\n logger.debug(\"No valid FFmpeg executables found at Custom FFmpeg path!\")\r\n return False\r\n else:\r\n # otherwise perform test for Unix\r\n if custom_ffmpeg:\r\n # if custom FFmpeg path is given assign to local variable\r\n if os.path.isfile(custom_ffmpeg):\r\n # check if valid FFmpeg file exist\r\n final_path += custom_ffmpeg\r\n elif os.path.isfile(os.path.join(custom_ffmpeg, \"ffmpeg\")):\r\n # check if FFmpeg directory exists, if does, then check for valid file\r\n final_path = os.path.join(custom_ffmpeg, \"ffmpeg\")\r\n else:\r\n # else return False\r\n if logging:\r\n logger.debug(\r\n \"No valid FFmpeg executables found at Custom FFmpeg path!\"\r\n )\r\n return False\r\n else:\r\n # otherwise assign ffmpeg binaries from system\r\n final_path += \"ffmpeg\"\r\n\r\n if logging:\r\n logger.debug(\"Final FFmpeg Path: {}\".format(final_path))\r\n\r\n # Final Auto-Validation for FFmeg Binaries. returns final path if test is passed\r\n if validate_ffmpeg(final_path, logging=logging):\r\n return final_path\r\n else:\r\n return False\r\n\r\n\r\ndef download_ffmpeg_binaries(path, os_windows=False, os_bit=\"\"):\r\n \"\"\"\r\n Download and Extract FFmpeg Static Binaries for windows(if not available)\r\n \"\"\"\r\n final_path = \"\"\r\n if os_windows and os_bit:\r\n # initialize variables\r\n file_url = \"https://ffmpeg.zeranoe.com/builds/{}/static/ffmpeg-latest-{}-static.zip\".format(\r\n os_bit, os_bit\r\n )\r\n file_name = os.path.join(\r\n os.path.abspath(path), \"ffmpeg-latest-{}-static.zip\".format(os_bit)\r\n )\r\n file_path = os.path.join(\r\n os.path.abspath(path),\r\n \"ffmpeg-latest-{}-static/bin/ffmpeg.exe\".format(os_bit),\r\n )\r\n base_path, _ = os.path.split(file_name) # extract file base path\r\n # check if file already exists\r\n if os.path.isfile(file_path):\r\n final_path += file_path # skip download if does\r\n else:\r\n # import libs\r\n import zipfile\r\n\r\n # check if given path has write access\r\n assert os.access(path, os.W_OK), (\r\n \"[Helper:ERROR] :: Permission Denied, Cannot write binaries to directory = \"\r\n + path\r\n )\r\n # remove leftovers if exists\r\n if os.path.isfile(file_name):\r\n os.remove(file_name)\r\n # download and write file to the given path\r\n with open(file_name, \"wb\") as f:\r\n logger.debug(\r\n \"No Custom FFmpeg path provided. Auto-Installing FFmpeg static binaries now. Please wait...\"\r\n )\r\n try:\r\n response = requests.get(file_url, stream=True, timeout=2)\r\n response.raise_for_status()\r\n except Exception as e:\r\n logger.exception(str(e))\r\n logger.warning(\"Downloading Failed. Trying GitHub mirror now!\")\r\n file_url = \"https://raw.githubusercontent.com/abhiTronix/ffmpeg-static-builds/master/windows/ffmpeg-latest-{}-static.zip\".format(\r\n os_bit, os_bit\r\n )\r\n response = requests.get(file_url, stream=True, timeout=2)\r\n response.raise_for_status()\r\n total_length = response.headers.get(\"content-length\")\r\n assert not (\r\n total_length is None\r\n ), \"[Helper:ERROR] :: Failed to retrieve files, check your Internet connectivity!\"\r\n bar = tqdm(total=int(total_length), unit=\"B\", unit_scale=True)\r\n for data in response.iter_content(chunk_size=4096):\r\n f.write(data)\r\n if len(data) > 0:\r\n bar.update(len(data))\r\n bar.close()\r\n logger.debug(\"Extracting executables.\")\r\n with zipfile.ZipFile(file_name, \"r\") as zip_ref:\r\n zip_ref.extractall(base_path)\r\n # perform cleaning\r\n os.remove(file_name)\r\n logger.debug(\"FFmpeg binaries for Windows configured successfully!\")\r\n final_path += file_path\r\n # return final path\r\n return final_path\r\n\r\n\r\ndef validate_ffmpeg(path, logging=False):\r\n \"\"\"\r\n Validate FFmeg Binaries. returns True if tests passed\r\n \"\"\"\r\n try:\r\n # get the FFmpeg version\r\n version = check_output([path, \"-version\"])\r\n firstline = version.split(b\"\\n\")[0]\r\n version = firstline.split(b\" \")[2].strip()\r\n if logging:\r\n # log if test are passed\r\n logger.debug(\"FFmpeg validity Test Passed!\")\r\n logger.debug(\r\n \"Found valid FFmpeg Version: `{}` installed on this system\".format(\r\n version\r\n )\r\n )\r\n except Exception as e:\r\n # log if test are failed\r\n if logging:\r\n logger.exception(str(e))\r\n logger.warning(\"FFmpeg validity Test Failed!\")\r\n return False\r\n return True\r\n\r\n\r\ndef check_output(*args, **kwargs):\r\n \"\"\"\r\n return output from the sub-process\r\n \"\"\"\r\n # silent subprocess execution\r\n closeNULL = 0\r\n import subprocess as sp\r\n\r\n try:\r\n from subprocess import DEVNULL\r\n\r\n closeNULL = 0\r\n except ImportError:\r\n DEVNULL = open(os.devnull, \"wb\")\r\n closeNULL = 1\r\n # execute command in subprocess\r\n process = sp.Popen(stdout=sp.PIPE, stderr=DEVNULL, *args, **kwargs)\r\n output, unused_err = process.communicate()\r\n retcode = process.poll()\r\n # close the process\r\n if closeNULL:\r\n DEVNULL.close()\r\n # if error occurred raise error\r\n if retcode:\r\n cmd = kwargs.get(\"args\")\r\n if cmd is None:\r\n cmd = args[0]\r\n error = sp.CalledProcessError(retcode, cmd)\r\n error.output = output\r\n raise error\r\n return output\r\n\r\n\r\ndef generate_auth_certificates(path, overwrite=False, logging=False):\r\n\r\n \"\"\" \r\n auto-Generates and auto-validates CURVE ZMQ keys/certificates for Netgear \r\n \"\"\"\r\n\r\n # import necessary libs\r\n import shutil\r\n import zmq.auth\r\n\r\n # check if path corresponds to vidgear only\r\n if os.path.basename(path) != \".vidgear\":\r\n path = os.path.join(path, \".vidgear\")\r\n\r\n # generate keys dir\r\n keys_dir = os.path.join(path, \"keys\")\r\n mkdir_safe(keys_dir, logging=logging)\r\n\r\n # generate separate public and private key dirs\r\n public_keys_dir = os.path.join(keys_dir, \"public_keys\")\r\n secret_keys_dir = os.path.join(keys_dir, \"private_keys\")\r\n\r\n # check if overwriting is allowed\r\n if overwrite:\r\n # delete previous certificates\r\n for dirs in [public_keys_dir, secret_keys_dir]:\r\n if os.path.exists(dirs):\r\n shutil.rmtree(dirs)\r\n mkdir_safe(dirs, logging=logging)\r\n\r\n # generate new keys\r\n server_public_file, server_secret_file = zmq.auth.create_certificates(\r\n keys_dir, \"server\"\r\n )\r\n client_public_file, client_secret_file = zmq.auth.create_certificates(\r\n keys_dir, \"client\"\r\n )\r\n\r\n # move keys to their appropriate directory respectively\r\n for key_file in os.listdir(keys_dir):\r\n if key_file.endswith(\".key\"):\r\n shutil.move(os.path.join(keys_dir, key_file), public_keys_dir)\r\n elif key_file.endswith(\".key_secret\"):\r\n shutil.move(os.path.join(keys_dir, key_file), secret_keys_dir)\r\n else:\r\n # clean redundant keys if present\r\n redundant_key = os.path.join(keys_dir, key_file)\r\n if os.path.isfile(redundant_key):\r\n os.remove(redundant_key)\r\n else:\r\n # otherwise validate available keys\r\n status_public_keys = validate_auth_keys(public_keys_dir, \".key\")\r\n status_private_keys = validate_auth_keys(secret_keys_dir, \".key_secret\")\r\n\r\n # check if all valid keys are found\r\n if status_private_keys and status_public_keys:\r\n return (keys_dir, secret_keys_dir, public_keys_dir)\r\n\r\n # check if valid public keys are found\r\n if not (status_public_keys):\r\n mkdir_safe(public_keys_dir, logging=logging)\r\n\r\n # check if valid private keys are found\r\n if not (status_private_keys):\r\n mkdir_safe(secret_keys_dir, logging=logging)\r\n\r\n # generate new keys\r\n server_public_file, server_secret_file = zmq.auth.create_certificates(\r\n keys_dir, \"server\"\r\n )\r\n client_public_file, client_secret_file = zmq.auth.create_certificates(\r\n keys_dir, \"client\"\r\n )\r\n\r\n # move keys to their appropriate directory respectively\r\n for key_file in os.listdir(keys_dir):\r\n if key_file.endswith(\".key\") and not (status_public_keys):\r\n shutil.move(\r\n os.path.join(keys_dir, key_file), os.path.join(public_keys_dir, \".\")\r\n )\r\n elif key_file.endswith(\".key_secret\") and not (status_private_keys):\r\n shutil.move(\r\n os.path.join(keys_dir, key_file), os.path.join(secret_keys_dir, \".\")\r\n )\r\n else:\r\n # clean redundant keys if present\r\n redundant_key = os.path.join(keys_dir, key_file)\r\n if os.path.isfile(redundant_key):\r\n os.remove(redundant_key)\r\n\r\n # validate newly generated keys\r\n status_public_keys = validate_auth_keys(public_keys_dir, \".key\")\r\n status_private_keys = validate_auth_keys(secret_keys_dir, \".key_secret\")\r\n\r\n # raise error is validation test fails\r\n if not (status_private_keys) or not (status_public_keys):\r\n raise RuntimeError(\r\n \"[Helper:ERROR] :: Unable to generate valid ZMQ authentication certificates at `{}`!\".format(\r\n keys_dir\r\n )\r\n )\r\n\r\n # finally return valid key paths\r\n return (keys_dir, secret_keys_dir, public_keys_dir)\r\n\r\n\r\ndef validate_auth_keys(path, extension):\r\n\r\n \"\"\"\r\n validates and maintains ZMQ Auth Keys/Certificates\r\n \"\"\"\r\n # check for valid path\r\n if not (os.path.exists(path)):\r\n return False\r\n\r\n # check if directory empty\r\n if not (os.listdir(path)):\r\n return False\r\n\r\n keys_buffer = [] # stores auth-keys\r\n\r\n # loop over auth-keys\r\n for key_file in os.listdir(path):\r\n key = os.path.splitext(key_file)\r\n # check if valid key is generated\r\n if key and (key[0] in [\"server\", \"client\"]) and (key[1] == extension):\r\n keys_buffer.append(key_file) # store it\r\n\r\n # remove invalid keys if found\r\n if len(keys_buffer) == 1:\r\n os.remove(os.path.join(path, keys_buffer[0]))\r\n\r\n # return results\r\n return True if (len(keys_buffer) == 2) else False\r\n","sub_path":"vidgear/gears/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":16816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"185423163","text":"\"\"\"PSO\"\"\"\n# -*- coding: utf-8 -*-\nimport copy\nfrom . import lib\nfrom .IndivClass import Individuo, Caminho\n\n\nclass PSO(object): \n config = ''\n pontos = ''\n individuos = []\n gbest = ''\n log1 = []\n log2 = [] \n def __init__(self, config, nrCidades, pontos):\n # Instanciando variáveis\n self.individuos = []\n # Gravando configurações na instância\n self.config = config\n # Gravando pontos de destino\n self.pontos = pontos \n # Inicializando Gbest\n self.gbest = Caminho()\n # Loop que cria os indivíduos e adiciona na lista\n for i in range(0, config.nr_indiv): \n indiv = Individuo(nrCidades)\n indiv.atual.calcDistancia(pontos)\n self.individuos.append(indiv)\n self.gera_pbest()\n self.gera_gbest()\n \n def gera_pbest(self):\n for indiv in self.individuos: \n if (indiv.pbest.distTotal == 0) or (indiv.pbest.distTotal > indiv.atual.distTotal):\n indiv.pbest = copy.deepcopy(indiv.atual)\n \n def gera_gbest(self): \n if (self.gbest.distTotal == 0):\n self.gbest = copy.deepcopy(self.individuos[0].atual)\n for indiv in self.individuos:\n if (self.gbest.distTotal > indiv.atual.distTotal) : \n self.gbest = copy.deepcopy(indiv.atual) \n \n def crossover(self, original, adicional, k, m):\n lista = copy.deepcopy(original)\n adc = copy.deepcopy(adicional)\n del lista[-1]\n del adc[-1]\n result = []\n result = list((adc[k : k + m]) + [x for x in lista if x not in adc[k:k+m]]) \n result.append(result[0])\n return result\n \n \n \n def gera_iteracoes(self, nrTotal):\n for i in range(0, nrTotal):\n \n for indiv in self.individuos:\n indiv.gera_constant() \n \n result = list(self.crossover(indiv.atual.caminho,\n indiv.pbest.caminho, \n indiv.k[0], indiv.m[0])) \n indiv.atual.caminho = list(self.crossover(result,\n self.gbest.caminho, \n indiv.k[1], indiv.m[1]))\n indiv.atual.calcDistancia(self.pontos)\n\n self.gera_pbest()\n self.gera_gbest() \n return self \n \n \n \n \n ","sub_path":"psoweb/py/PSOClass.py","file_name":"PSOClass.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"48889740","text":"import json\nimport pprint\nimport re\nimport item_map\n\npp = pprint.PrettyPrinter(indent=4)\n\nitem_file = open('./json/item.json')\nitem_json = json.load(item_file)['data']\n\n# Gets Basic Info for an Item and returns a dictionary\ndef get_item_info(item_name : str):\n item_id = item_map.get_item_id_from_name(item_name=item_name)\n\n item_info = item_json[item_id]\n\n pp.pprint(item_info)\n\n #Clean up Item Description\n description = item_info['description']\n\n tag_list = ['
', '', '']\n for tag in tag_list:\n description = description.replace(tag, '\\n')\n\n groomed_description = ' '\n\n keep_letter = True\n for letter in description:\n if letter in ['<', '>']:\n keep_letter = not keep_letter\n continue\n if keep_letter:\n groomed_description += letter\n \n\n return_info = {\n 'name' : item_info['name'],\n 'description' : groomed_description,\n 'plaintext' : item_info['plaintext'],\n 'price' : item_info['gold']['total'],\n 'image' : item_info['image']['full']\n }\n\n return return_info\n\n\nif __name__ == \"__main__\":\n data = get_item_info(\"Black Cleaver\")\n pp.pprint(data)","sub_path":"item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"633707153","text":"from sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.pipeline import Pipeline\nimport numpy as np\n\n\nclass MLearner:\n\n def __init__(self, budget, ndim, power_model):\n self.budget = budget\n self.degree = ndim\n self.model = power_model\n\n def discover(self, observation_noise_level=0):\n # performance models has interaction degree of two, based on our study\n model = Pipeline([(\"poly\", PolynomialFeatures(degree=2, interaction_only=True, include_bias=True)),\n (\"linear\", LinearRegression(fit_intercept=True))])\n\n # take some ran dom samples\n # this should be replaced with pair wise sampling\n X = np.random.randint(2, size=(self.budget, self.degree))\n y = self.model.evaluateModelFast(X)\n if observation_noise_level != 0:\n #print(\"Noise observation: N(0, {})\".format(observation_noise_level))\n y = y + np.random.normal(\n loc=0.0,\n scale=observation_noise_level,\n size=self.budget)\n\n # fit the polynomial model regression\n pmodel = model.fit(X, y)\n\n return pmodel\n\n def get_pareto_frontier(self, Xs, Ys, maxX=True, maxY=True):\n # Sort the list in either ascending or descending order of X\n myList = sorted([[Xs[i], Ys[i]] for i in range(len(Xs))], reverse=maxX)\n idx_sorted = sorted(range(len(Xs)), key=lambda k: Xs[k])\n # Start the Pareto frontier with the first value in the sorted list\n p_front = [myList[0]]\n i = 0\n pareto_idx = [idx_sorted[i]]\n # Loop through the sorted list\n for pair in myList[1:]:\n i += 1\n if maxY:\n if pair[1] >= p_front[-1][1]:\n p_front.append(pair)\n pareto_idx.append(idx_sorted[i])\n else:\n if pair[1] <= p_front[-1][1]:\n p_front.append(pair)\n pareto_idx.append(idx_sorted[i])\n p_frontX = [pair[0] for pair in p_front]\n p_frontY = [pair[1] for pair in p_front]\n return pareto_idx, p_frontX, p_frontY\n\n\n","sub_path":"learner/mlearner.py","file_name":"mlearner.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"556821877","text":"\"\"\"empty message\n\nRevision ID: e70c280131\nRevises: 2c0b16f53a4\nCreate Date: 2016-04-24 10:52:14.955610\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'e70c280131'\ndown_revision = '2c0b16f53a4'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('famous',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('content', sa.Text(), nullable=True),\n sa.Column('timestamp', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('famous')\n ### end Alembic commands ###\n","sub_path":"migrations/versions/e70c280131_.py","file_name":"e70c280131_.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"345361884","text":"import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport numpy as np\nimport random\nfrom scipy.special import expit\n\nclass AttentionLayer(nn.Module):\n def __init__(self, hidden_size):\n super(AttentionLayer, self).__init__()\n\n self.hidden_size = hidden_size\n self.match = nn.Linear(2*hidden_size, hidden_size)\n self.to_weight = nn.Linear(hidden_size, 1, bias=False)\n\n\n def forward(self, hidden_state, encoder_outputs):\n \"\"\"\n Arguments:\n (decoder current) hidden_state {Variable} -- (1, batch, hidden_size)\n encoder_outputs {Variable} -- (batch, seq_len, hidden_size) \n Returns:\n Variable -- context vector of size batch_size x dim\n \"\"\"\n\n batch_size, seq_len, feat_n = encoder_outputs.size()\n # Resize hidden_state and copy it seq_len times, so that we can get its attention\n # with each encoder_output\n hidden_state = hidden_state.view(batch_size, 1, feat_n).repeat(1, seq_len, 1)\n\n matching_inputs = torch.cat((encoder_outputs, hidden_state), 2).view(-1, 2*self.hidden_size)\n\n attention_weights = self.to_weight(self.match(matching_inputs))\n attention_weights = attention_weights.view(batch_size, seq_len)\n attention_weights = F.softmax(attention_weights, dim=1)\n\n context = torch.bmm(attention_weights.unsqueeze(1), encoder_outputs).squeeze(1)\n\n return context\n\n\nclass EncoderRNN(nn.Module):\n def __init__(self, word_vec_filepath='word_vectors.npy', hidden_size=1024, num_layers=2): # TODO: use more than 1 layer (may have to adjust other modules)\n super(EncoderRNN, self).__init__()\n \n self.hidden_size = hidden_size\n \n # load pretrained embedding\n pretrained = np.load(word_vec_filepath)\n self.vocab_size = pretrained.shape[0]\n self.word_vec_dim = pretrained.shape[1]\n \n embedding = nn.Embedding(num_embeddings=self.vocab_size, embedding_dim=self.word_vec_dim)\n embedding.weight = nn.Parameter(torch.Tensor(pretrained)) # requires_grad == True\n self.embedding = embedding # TODO: can let encoder and decoder share embeddings\n \n # feed word vector into encoder GRU\n self.gru = nn.GRU(input_size=self.word_vec_dim, hidden_size=self.hidden_size, num_layers=num_layers, batch_first=True)\n\n def forward(self, input): # input: (batch_size, sentence_length)\n \"\"\"\n param input: input sequence with shape (batch size, sequence_length)\n return: gru output, hidden state\n \"\"\" \n word_embeddings = self.embedding(input) # (batch_size, sentence_length, word_vec_dim)\n top_layer_output, last_time_step_all_layers_output = self.gru(word_embeddings)\n # top_layer_output: (seq_len, batch, hidden_size * num_directions)\n # last_time_step_all_layers_output: (num_layers * num_directions, batch, hidden_size)\n \n return top_layer_output, last_time_step_all_layers_output\n\nclass DecoderRNN(nn.Module):\n def __init__(self, word_vec_filepath='word_vectors.npy', hidden_size=1024, num_layers=2): # TODO: use more than 1 layer (may have to adjust other modules)\n super(DecoderRNN, self).__init__()\n\n # define hyper parameters\n self.hidden_size = hidden_size # size of gru's Y and H\n \n # load pretrained embedding\n pretrained = np.load(word_vec_filepath)\n self.vocab_size = pretrained.shape[0]\n self.word_vec_dim = pretrained.shape[1]\n \n embedding = nn.Embedding(num_embeddings=self.vocab_size, embedding_dim=self.word_vec_dim)\n embedding.weight = nn.Parameter(torch.Tensor(pretrained)) # requires_grad == True\n self.embedding = embedding # TODO: can let encoder and decoder share embeddings\n\n # gru input is word vector of prev_output_word (one hot), plus attention context vector\n self.gru = nn.GRU(self.word_vec_dim+self.hidden_size, hidden_size=self.hidden_size, num_layers=num_layers, batch_first=True)\n self.attention = AttentionLayer(self.hidden_size)\n # output is softmax over entire vocabulary\n self.to_final_output = nn.Linear(self.hidden_size, self.vocab_size)\n\n def forward(self, encoder_last_hidden_state, encoder_output, targets=None, mode='train', steps=None):\n \"\"\"\n :param encoder_last_hidden_state: (num_layers * num_directions, batch, hidden_size)\n :param encoder_output: (batch, length_prev_sentences, hidden_size * num_directions)\n :param targets: (batch, length_curr_sentences) target ground truth sentences\n :param steps: just a parameter used for calculating scheduled sampling, unrelated to RNN time steps\n :return:\n \"\"\"\n\n # parameters used in both train and inference stage\n _, batch_size, _ = encoder_last_hidden_state.size()\n decoder_current_hidden_state = encoder_last_hidden_state # (encoder_num_layers * num_directions, batch, hidden_size)\n decoder_current_input_word = Variable(torch.ones(batch_size, 1)).long() # (batch x word index)\n decoder_current_input_word = decoder_current_input_word.cuda() if torch.cuda.is_available() else decoder_current_input_word\n seq_Prob = []\n seq_predictions = []\n\n\n if targets is None:\n raise NotImplementedError('Training target is None. Error location: RNNDecoder')\n if steps is None:\n raise NotImplementedError('steps is not specified. Error location: RNNDecoder -> steps')\n\n # targets is only used for scheduled sampling, not used for calculating loss\n targets = self.embedding(targets) # (batch, max_seq_len, embedding_size) embeddings of target labels of ground truth sentences\n _, seq_len, _ = targets.size()\n\n for i in range(seq_len-1): # Decoder will never have EOS as input\n \"\"\"\n we implement the decoding procedure in a step by step fashion\n so the seq_len is always 1\n \"\"\"\n threshold = self._get_teacher_learning_ratio(training_steps=steps)\n \n # target[:, i]: (batch, 1, embedding_size)\n current_input_word = targets[:, i] if random.random() < threshold \\\n else self.embedding(decoder_current_input_word)\n # current_input_word: (batch, 1, embedding_size)\n\n # weighted sum of the encoder output w.r.t the current hidden state\n context = self.attention(decoder_current_hidden_state[-1], encoder_output) # (1, batch, hidden_size) (batch, seq_len, hidden_size) \n # context: (batch, hidden_size)\n gru_input = torch.cat([current_input_word.squeeze(1), context], dim=1).unsqueeze(1)\n # gru_input: (batch, 1, embedding_size+hidden_size)\n\n # only runs for one time step because sequence length is only 1\n gru_output, decoder_current_hidden_state = self.gru(gru_input, decoder_current_hidden_state)\n # gru_output (last time step): (batch, seq_length==1, hidden_size * num_directions)\n # decoder_current_hidden_state (last layer): (num_layers * num_directions, batch, hidden_size)\n\n # project the dim of the gru output to match the final decoder output dim\n # logprob = F.log_softmax(self.to_final_output(gru_output.squeeze(1)), dim=1)\n prob = self.to_final_output(gru_output.squeeze(1)) # prob: (batch, vocab_size)\n seq_Prob.append(prob)\n\n decoder_current_input_word = prob.max(1)[1]\n \n # seq_Prob: list of [(batch, vocab_size), (batch, vocab_size)], len(list) == seq_len\n seq_Prob = torch.stack(seq_Prob, dim=1)\n # seq_Prob: (batch, seq_len, vocab_size)\n \n seq_predictions = seq_Prob.max(2)[1]\n # seq_predictions: (batch, seq_length)\n\n return seq_Prob, seq_predictions\n\n # basically same as forward(), but without scheduled sampling\n def infer(self, encoder_last_hidden_state, encoder_output, assumption_seq_len=28):\n _, batch_size, _ = encoder_last_hidden_state.size()\n decoder_current_hidden_state = encoder_last_hidden_state # (encoder_num_layers * num_directions, batch, hidden_size)\n decoder_current_input_word = Variable(torch.ones(batch_size, 1)).long() # (batch x word index)\n decoder_current_input_word = decoder_current_input_word.cuda() if torch.cuda.is_available() else decoder_current_input_word\n seq_Prob = []\n seq_predictions = []\n\n for i in range(assumption_seq_len-1): # run for fixed amount of time steps\n\n current_input_word = self.embedding(decoder_current_input_word)\n\n context = self.attention(decoder_current_hidden_state[-1], encoder_output)\n\n gru_input = torch.cat([current_input_word.squeeze(1), context], dim=1).unsqueeze(1)\n\n gru_output, decoder_current_hidden_state = self.gru(gru_input, decoder_current_hidden_state)\n\n prob = self.to_final_output(gru_output.squeeze(1))\n seq_Prob.append(prob)\n\n decoder_current_input_word = prob.max(1)[1]\n\n seq_Prob = torch.stack(seq_Prob, dim=1)\n\n seq_predictions = seq_Prob.max(2)[1]\n\n return seq_Prob, seq_predictions\n \n # beammmmmm search!\n def beam_search(self, encoder_last_hidden_state, encoder_output, assumption_seq_len=27):\n _, batch_size, _ = encoder_last_hidden_state.size()\n decoder_current_hidden_state = encoder_last_hidden_state # (encoder_num_layers * num_directions, batch, hidden_size)\n decoder_current_input_word = Variable(torch.ones(batch_size, 1)).long() # (batch x word index)\n decoder_current_input_word = decoder_current_input_word.cuda() if torch.cuda.is_available() else decoder_current_input_word\n #seq_Prob_1 = 0\n #seq_Prob_2 = 0\n seq_predictions_1 = []\n seq_predictions_2 = []\n \n k = 2\n \n current_input_word = self.embedding(decoder_current_input_word)\n context = self.attention(decoder_current_hidden_state[-1], encoder_output)\n gru_input = torch.cat([current_input_word.squeeze(1), context], dim=1).unsqueeze(1)\n gru_output, decoder_current_hidden_state = self.gru(gru_input, decoder_current_hidden_state)\n prob = self.to_final_output(gru_output.squeeze(1))\n logprob = F.log_softmax(prob, dim=1)\n beam_word = logprob.topk(k, dim=1, largest=True, sorted=True)\n \n \n decoder_current_input_word_1 = beam_word[1][0][0].clone()\n decoder_current_input_word_2 = beam_word[1][0][1].clone()\n decoder_current_hidden_state_1 = decoder_current_hidden_state.clone()\n decoder_current_hidden_state_2 = decoder_current_hidden_state.clone()\n \n seq_predictions_1 = (decoder_current_input_word_1)\n seq_predictions_2 = (decoder_current_input_word_2)\n \n #print(seq_predictions_1)\n #print(seq_predictions_2)\n \n seq_Prob_1 = beam_word[0][0][0].clone()\n seq_Prob_2 = beam_word[0][0][1].clone()\n #print(seq_Prob_1)\n #print(seq_Prob_2)\n \n \n for i in range(assumption_seq_len-1): # run for fixed amount of time steps\n\n current_input_word_1 = self.embedding(decoder_current_input_word_1)\n current_input_word_2 = self.embedding(decoder_current_input_word_2)\n\n context_1 = self.attention(decoder_current_hidden_state_1[-1], encoder_output)\n context_2 = self.attention(decoder_current_hidden_state_2[-1], encoder_output)\n\n gru_input_1 = torch.cat([current_input_word_1.squeeze(1), context], dim=1).unsqueeze(1)\n gru_input_2 = torch.cat([current_input_word_2.squeeze(1), context], dim=1).unsqueeze(1)\n\n gru_output_1, decoder_current_hidden_state_1 = self.gru(gru_input_1, decoder_current_hidden_state_1)\n gru_output_2, decoder_current_hidden_state_2 = self.gru(gru_input_2, decoder_current_hidden_state_2)\n\n prob_1 = self.to_final_output(gru_output_1.squeeze(1))\n prob_2 = self.to_final_output(gru_output_2.squeeze(1))\n\n logprob_1 = F.log_softmax(prob_1, dim=1)\n logprob_2 = F.log_softmax(prob_2, dim=1)\n \n decoder_current_input_word_1 = logprob_1.max(1)[1]\n decoder_current_input_word_2 = logprob_2.max(1)[1]\n \n seq_predictions_1 = torch.cat((seq_predictions_1, decoder_current_input_word_1), dim=0)\n seq_predictions_2 = torch.cat((seq_predictions_2, decoder_current_input_word_2), dim=0)\n \n #print(seq_predictions_1)\n #print(seq_predictions_2)\n \n return seq_predictions_1, seq_predictions_2\n\n\n def _get_teacher_learning_ratio(self, training_steps): # TODO: change scheduled sampling scheme\n epoch = training_steps\n return max(30 - epoch/2, 0) / 30\n # for epochs 1 ~ 30, ratio is 0.9999 ~ 0.5\n # for epochs larger than 60, ratio is 0\n\n\n\nclass VideoCaptionGenerator(nn.Module):\n def __init__(self, encoder, decoder):\n super(VideoCaptionGenerator, self).__init__()\n\n self.encoder = encoder\n self.decoder = decoder\n\n\n def forward(self, prev_sentences, mode, curr_sentences=None, steps=None):\n \"\"\"\n Args:\n param avi_feats(Variable): size(batch size x 80 x 4096)\n param target_sentences: ground truth for training, None for inference\n Returns:\n seq_Prob\n seq_predictions\n \"\"\"\n top_layer_output, last_time_step_all_layers_output = self.encoder(prev_sentences) # prev_sentences (batch_size, sentence_length)\n\n if mode == 'train': # (self, encoder_last_hidden_state, encoder_output, targets=None, mode='train', steps=None):\n seq_Prob, seq_predictions = self.decoder(\n encoder_last_hidden_state = last_time_step_all_layers_output,\n encoder_output = top_layer_output,\n targets = curr_sentences,\n mode = mode,\n steps=steps\n )\n\n elif mode == 'inference':\n seq_Prob, seq_predictions = self.decoder.infer(\n encoder_last_hidden_state= last_time_step_all_layers_output,\n encoder_output= top_layer_output,\n )\n \n elif mode == 'beam_search':\n seq_Prob, seq_predictions = self.decoder.beam_search(\n encoder_last_hidden_state= last_time_step_all_layers_output,\n encoder_output= top_layer_output,\n )\n \n else:\n raise KeyError('mode is not valid')\n\n return seq_Prob, seq_predictions\n # seq_Prob: (batch, seq_len, vocab_size)\n # seq_predictions: (batch, seq_length)\n\nif __name__ == '__main__':\n from dataset import TrainingDataset, collate_fn\n from vocabulary import Vocabulary\n from torch.utils.data import DataLoader\n\n training_data_path='data/clr_conversation.txt'\n helper = Vocabulary(training_data_path)\n dataset = TrainingDataset(training_data_path, helper)\n dataloader = DataLoader(dataset, batch_size=128, shuffle=True, num_workers=8, collate_fn=collate_fn)\n \n encoder = EncoderRNN()\n decoder = DecoderRNN()\n model = VideoCaptionGenerator(encoder=encoder, decoder=decoder)\n \n for batch_idx, batch in enumerate(dataloader):\n padded_prev_sentences, padded_curr_sentences, lengths_curr_sentences = batch\n padded_prev_sentences, padded_curr_sentences = Variable(padded_prev_sentences), Variable(padded_curr_sentences)\n\n step = 50\n seq_Prob, seq_predictions = model(prev_sentences=padded_prev_sentences, mode='train', curr_sentences=padded_curr_sentences, steps=step)\n\n print(seq_Prob)\n print()\n print(seq_predictions)\n break","sub_path":"hw2/hw2_2/mao_2-2/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":15925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"216078690","text":"import os\ndef c_print(*args, **kwargs):\n '''\n Uses ascii codes to enable colored print statements. Works on Mac, Linux and Windows terminals\n '''\n\n #Magic that makes colors work on windows terminals\n os.system('')\n \n #Define Colors for more readable output\n c_gray = '\\033[90m'\n c_red = '\\033[91m'\n c_green = '\\033[92m'\n c_yellow = '\\033[93m'\n c_blue = '\\033[94m'\n c_end = '\\033[0m'\n\n color = c_end\n if 'color' in kwargs:\n c = kwargs['color'].lower()\n if c == 'gray' or c == 'grey':\n color = c_gray\n elif c == 'red':\n color = c_red\n elif c == 'green':\n color = c_green\n elif c == 'yellow':\n color = c_yellow\n elif c == 'blue':\n color = c_blue\n else:\n color = c_end\n\n _end = '\\n'\n if 'end' in kwargs:\n _end = kwargs['end']\n\n print(f'{color}', end='')\n for val in args:\n print(val, end='')\n print(f'{c_end}', end=_end)","sub_path":"sdk/color_print.py","file_name":"color_print.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"391623159","text":"# -*- coding: utf-8 -*-\nfrom django import forms\nfrom oauth.views import get_note_store\n\n\nclass SettingForm(forms.Form):\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user', None)\n super(SettingForm, self).__init__(*args, **kwargs)\n\n note_store = get_note_store(self.user.username)\n notebooks = note_store.listNotebooks()\n note_book_guid_list = []\n for notebook in notebooks:\n note_book_guid_list.append([notebook.guid, notebook.name])\n\n self.fields['notebook_guid'] = forms.ChoiceField(\n choices=note_book_guid_list,\n widget=forms.Select(attrs={'class': 'form-control'},),\n label=u'Markdown保存用ノートブック'\n )\n\n def clean(self):\n cleaned_data = super(SettingForm, self).clean()\n return cleaned_data\n\n class Meta:\n fields = ('notebook_guid',)\n","sub_path":"settings/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"317235135","text":"#!/usr/bin/env python3\n\nimport requests\n\nAPI = \"https://api.magicthegathering.io/v1/\"\n\ndef main():\n \"\"\"Run time code\"\"\"\n\n resp = requests.get(f\"{API}sets\")\n\n cardsets = resp.json().get(\"sets\")\n\n with open(\"mtgsets.index\", \"w\") as mtgfile:\n for cardset in cardsets:\n print(f\"{cardset.get('name')} -- {cardset.get('code')}\", file = mtgfile)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"mtg/apiMTG06.py","file_name":"apiMTG06.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"315016625","text":"import os\r\nfrom selenium.webdriver import Chrome, ChromeOptions\r\nimport time\r\nimport pandas as pd\r\n\r\n# Chromeを起動する関数\r\n\r\n\r\ndef set_driver(driver_path, headless_flg):\r\n # Chromeドライバーの読み込み\r\n options = ChromeOptions()\r\n\r\n # ヘッドレスモード(画面非表示モード)をの設定\r\n if headless_flg == True:\r\n options.add_argument('--headless')\r\n\r\n # 起動オプションの設定\r\n options.add_argument(\r\n '--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36')\r\n # options.add_argument('log-level=3')\r\n options.add_argument('--ignore-certificate-errors')\r\n options.add_argument('--ignore-ssl-errors')\r\n options.add_argument('--incognito') # シークレットモードの設定を付与\r\n\r\n # ChromeのWebDriverオブジェクトを作成する。\r\n return Chrome(executable_path=os.getcwd() + \"/\" + driver_path, options=options)\r\n\r\n# main処理\r\n\r\n\r\ndef main():\r\n search_keyword = input(\"検索ワードを入力してください:例 [ 社名、土日休み、未経験など ]\")\r\n # driverを起動\r\n if os.name == 'nt': #Windows\r\n driver = set_driver(\"chromedriver.exe\", False)\r\n elif os.name == 'posix': #Mac\r\n driver = set_driver(\"chromedriver\", False)\r\n # Webサイトを開く\r\n driver.get(\"https://tenshoku.mynavi.jp/\")\r\n time.sleep(5)\r\n \r\n try:\r\n # ポップアップを閉じる\r\n driver.execute_script('document.querySelector(\".karte-close\").click()')\r\n time.sleep(5)\r\n # ポップアップを閉じる\r\n driver.execute_script('document.querySelector(\".karte-close\").click()')\r\n except:\r\n pass\r\n \r\n # 検索窓に入力\r\n driver.find_element_by_class_name(\"topSearch__text\").send_keys(search_keyword)\r\n # 検索ボタンクリック\r\n driver.find_element_by_class_name(\"topSearch__button\").click()\r\n\r\n # ページ終了まで繰り返し取得\r\n exp_name_list = []\r\n exp_add_list = []\r\n exp_mony_list = []\r\n count = 1\r\n \r\n while True:\r\n #for num in range(1): #testに使用\r\n # 検索結果の一番上の会社名を取得\r\n name_list = driver.find_elements_by_class_name(\"cassetteRecruit__name\")\r\n #tableのclassを選択 → 下層にある tbody → さらに下層の3つ目のTrを選択 これが初年度年収 → tdに抜きたい値 \r\n add_list = driver.find_elements_by_xpath(\"//table[@class='tableCondition']/tbody/tr[3]/td\")\r\n #tableのclassを選択 → 下層にある tbody → さらに下層の5つ目のTrを選択 これが初年度年収 → tdに抜きたい値 \r\n mony_list = driver.find_elements_by_xpath(\"//table[@class='tableCondition']/tbody/tr[5]/td\")\r\n\r\n # 1ページ分繰り返し\r\n for name,add,mony in zip(name_list,add_list,mony_list):\r\n try:\r\n #余分な情報を消す\r\n name1 = name.text.split(' ')[0]\r\n #X = name.text.find(' ') #こっちでもOK\r\n #print(count,name.text[0:X],mony1) #こっちでもOK\r\n\r\n #出力\r\n print(count,name1,add.text,mony.text)\r\n\r\n #エラーの回避策\r\n except Exception as e:\r\n print(e)\r\n #最後に必ず実行される\r\n finally: \r\n count += 1\r\n \r\n # 次のページボタンがあればクリックなければ終了\r\n #button = driver.find_element_by_xpath(\"/html/body/div[1]/div[3]/form/div/nav[1]/ul/li[8]/a\") #タグは.click()ではなく、.get_attribute(“href”)でリンクURLを抜く\r\n #button = driver.find_element_by_xpath(\"/html/body/div[1]/div[3]/form/div/nav[1]/ul/li[7]/a\") #タグは.click()ではなく、.get_attribute(“href”)でリンクURLを抜く\r\n button = driver.find_elements_by_class_name(\"iconFont--arrowLeft\")\r\n if len(button) > 0 : #elementsで取得すると、listとして認識されるindexの指定をする必要あり\r\n #button[0].click() #タグは.click()できない\r\n button1 = button[0].get_attribute(\"href\")\r\n driver.get(button1)\r\n time.sleep(3)\r\n else:\r\n break\r\n\r\n# 直接起動された場合はmain()を起動(モジュールとして呼び出された場合は起動しないようにするため)\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"mynavi_sample_step4.py","file_name":"mynavi_sample_step4.py","file_ext":"py","file_size_in_byte":4579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"57526994","text":"def dfs(x: int, d: int):\n global ans\n global t\n global flag\n if d >= ans:\n return\n if x > t:\n ans = d\n return\n flag[x] = 0\n while flag[x] < 2:\n cnt[0] = 0\n cnt[1] = 0\n for i in range(1, x):\n if r[i] > l[x]:\n if flag[i] == flag[x] and r[i] < r[x]:\n cnt[1] += 1\n else:\n cnt[0] += 1\n dfs(x + 1, d + min(cnt[0], cnt[1]))\n flag[x] += 1\n\n\n\ndef work():\n global l\n global r\n global t\n global ans\n global a\n global n\n n = int(input())\n t = 0\n a = list(map(int, input().split(\" \")))\n a.insert(0, 0)\n for i in range(1, n+1):\n for j in range(i+1, n+1):\n if a[i] == a[j]:\n t += 1\n l[t] = i\n r[t] = j\n ans = 1061109567\n dfs(1, 0)\n print(ans)\n\n\nif __name__ == '__main__':\n T = int(input())\n for _ in range(T):\n n = 0\n ans = 0\n t = 0\n a = []\n l = [0] * 50\n r = [0] * 50\n flag = [0] * 50\n cnt = [0, 0]\n work()\n","sub_path":"Code/CodeRecords/2350/60705/297755.py","file_name":"297755.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"361831688","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 17 00:06:47 2018\n\n@author: rusty\n\"\"\"\n# https://www.hktvmall.com/\nimport json\nimport requests\nimport time\n\n#time test\n#start_time = time.time()\n\n\n# search with certain query\nquery= u'洗髮乳'\nmy_url ='https://8rn1y79f02-dsn.algolia.net/1/indexes/*/queries?x-algolia-agent=Algolia%20for%20vanilla%20JavaScript%203.29.0&x-algolia-application-id=8RN1Y79F02&x-algolia-api-key=4819ab3e709be2e853154599a364464a'\nuser_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'\npage = 0\nvalue= {\"requests\":[{\n \"indexName\":\"hktvProduct\",\n \"params\":\"query=\"+ query +\n \"&filters=&facets=%5B%22*%22%5D&attributesToRetrieve=%5B%22*%22%5D&\"\n \"page=\"+ str(page) +\n \"&hitsPerPage=60&maxValuesPerFacet=1000\"}]}\n\nheaders = {'User-Agent': user_agent,\n# 'Referer': my_url,\n 'content-type': 'application/x-www-form-urlencoded',\n 'accept': 'application/json'\n\n }\n\ntry:\n req = requests.post(my_url, json=value , headers=headers, timeout = 0.2) \n print(req.status_code)\n req.encoding = 'utf-8' \n ans= json.loads(req.text)\n# for key , value in ans.items():\n# print (key)\n# print (value)\n# print(ans)\n for item in ans['results'][0]['hits']:\n print(item['code'],item['sellingPrice'],item['nameZh'],item['nameEn'],item['numberOfReviews'])\n \n# print(req.text)\nexcept requests.RequestException as e:\n print(e)\n \n#elapsed_time = time.time() - start_time\n#print (elapsed_time)\n\n\n","sub_path":"HKTVMALL/HKtvmall.py","file_name":"HKtvmall.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"319121226","text":"COLORS = [\n 'black',\n 'brown',\n 'red',\n 'orange',\n 'yellow',\n 'green',\n 'blue',\n 'violet',\n 'grey',\n 'white',\n]\n\ndef value(colors):\n codes = [COLORS.index(color) for color in colors]\n return sum(code * 10**i for i, code in enumerate(reversed(codes)))\n","sub_path":"python/resistor-colors/resistor_colors.py","file_name":"resistor_colors.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"613911723","text":"import re, string\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\ndef CleanInput(content):\n content = re.sub('\\n|\\[\\d+\\]', ' ', content) # clean newlines and citations\n content_bytes = content.encode('utf-8') # eliminate unicode escape characters\n content = content_bytes.decode('ascii', 'ignore') # filter non-English characters\n sentences = content.split('. ')\n return [CleanSentence(sentence) for sentence in sentences]\n\ndef CleanSentence(sentence):\n wordlist = sentence.split(' ')\n wordlist = [word.strip(string.punctuation+string.whitespace)\n for word in wordlist] # clean words like 'ago(', '-'\n wordlist = [word for word in wordlist if len(word) > 1\n or (word.lower() == 'a' or word.lower() == 'i')]\n return wordlist \n \ndef getNgramsFromSentence(wordlist, n): # sentence: ['word1', 'word2', 'word3']\n output = []\n for i in range(len(wordlist)-n+1):\n output.append(wordlist[i:i+n])\n return output # [['word1', 'word2'], ['word2', 'word3']]\n\ndef getNgrams(content, n):\n sentences = CleanInput(content)\n ngrams = []\n for wordlist in sentences:\n ngrams.extend(getNgramsFromSentence(wordlist, n))\n return ngrams\n\n\nhtml = urlopen('https://en.wikipedia.org/wiki/Python_(programming_language)')\nbs = BeautifulSoup(html, 'html.parser')\ncontent = bs.find('div', {'id':'mw-content-text'}).get_text()\nwith open('origin_text.txt', 'w') as f:\n f.write(content)\n\ntwo_grams = getNgrams(content, 2)\nprint('2-grams count is {}'.format(len(two_grams)))\ntwo_grams_text = ''\nfor two_gram in two_grams:\n two_grams_text += two_gram.__repr__()\n two_grams_text += '\\n'\nwith open('2grams_cleaned_text.txt', 'w') as f:\n f.write(two_grams_text)\n","sub_path":"C8-CleaningDirtyData/2grams_cleaned.py","file_name":"2grams_cleaned.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"550492415","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\"\"\"\ntable : [0, 0, 0, 0, 0]\nvalue : [0, 1, 1, 2, 5]\n\"\"\"\n\ndef fibonacciModified(t1, t2, n):\n arr = [0] * n\n\n arr[0] = t1\n arr[1] = t2\n\n for i in range(2, len(arr)):\n arr[i] = arr[i-1]**2 + arr[i-2]\n\n return arr[-1]\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n t1T2n = input().split()\n\n t1 = int(t1T2n[0])\n\n t2 = int(t1T2n[1])\n\n n = int(t1T2n[2])\n\n result = fibonacciModified(t1, t2, n)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()","sub_path":"dynamic/fibo.py","file_name":"fibo.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"34268023","text":"from django.core.management.base import BaseCommand\nfrom django.utils import timezone\nfrom scheduler.models import RepeatableJob\n\n\nclass Command(BaseCommand):\n help = \"Creates a RepeatableJob to check for waiting builds.\"\n\n def handle(self, *args, **options):\n job, created = RepeatableJob.objects.get_or_create(\n callable=\"metaci.build.tasks.check_waiting_builds\",\n enabled=True,\n name=\"check_waiting_builds\",\n queue=\"short\",\n defaults={\n \"interval\": 1,\n \"interval_unit\": \"minutes\",\n \"scheduled_time\": timezone.now(),\n },\n )\n if created:\n self.stdout.write(\n self.style.SUCCESS(\n f\"Created job check_waiting_builds with id {job.id}\"\n )\n )\n else:\n self.stdout.write(\n self.style.SUCCESS(\n f\"Scheduled job check_waiting_builds with id {job.id} \"\n + f\"already exists and is {'enabled' if job.enabled else 'disabled'}.\"\n )\n )\n","sub_path":"metaci/build/management/commands/metaci_scheduled_jobs.py","file_name":"metaci_scheduled_jobs.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"197310794","text":"#!/usr/bin/env python3\n\n# Required parameters:\n# @raycast.schemaVersion 1\n# @raycast.title Import scripts from Script Kit\n# @raycast.mode fullOutput\n\n# Optional parameters:\n# @raycast.icon ./images/kit_logo.png\n# @raycast.packageName Script Kit\n\n# Documentation:\n# @raycast.description Convert all Script Kit scripts to Raycast Script Commands\n# @raycast.author Achille Lacoin\n# @raycast.authorURL https://github.com/pomdtr\n\nimport json\nimport os\nimport string\nimport sys\nimport glob\n\ndb_path = os.path.join(os.path.expanduser(\"~\"), \".kit\", \"db\", \"scripts.json\")\nif not os.path.exists(db_path):\n print(\n \"Kit is required! Installation Link -> https://www.scriptkit.com/\",\n file=sys.stderr,\n )\n sys.exit(1)\n\nwith open(db_path) as f:\n scripts_db = json.load(f)\n\nwith open(\"template.txt\") as f:\n RAYCAST_SCRIPT_TEMPLATE = string.Template(f.read())\n\nfor file in glob.glob(\"scripts/*.sh\"):\n os.remove(file)\n\nfor script in scripts_db[\"scripts\"]:\n if script[\"exclude\"]:\n print(\"Skipping {} since it is excluded!\".format(script[\"command\"]))\n continue\n print(\"Importing {}...\".format(script[\"command\"]))\n raycast_script_content = RAYCAST_SCRIPT_TEMPLATE.safe_substitute(\n {\n \"description\": script[\"description\"],\n \"title\": script[\"name\"],\n \"author\": script[\"author\"],\n \"command\": script[\"command\"]\n }\n )\n output_path = os.path.join(\"scripts\", script[\"command\"] + \".sh\")\n with open(output_path, \"w\") as f:\n f.write(raycast_script_content)\n\nprint(\"\\nImported {} scripts.\".format(len(scripts_db[\"scripts\"])))\n","sub_path":"commands/apps/script-kit/import-scripts-from-script-kit.py","file_name":"import-scripts-from-script-kit.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"227970711","text":"import xlrd\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nDATA_FILE = 'fire_theft.xls'\n\nbook = xlrd.open_workbook(DATA_FILE,encoding_override='utf-8')\nsheet = book.sheet_by_index(0)\n\ndata = np.asarray([sheet.row_values(i) for i in range(1,sheet.nrows)])\nnum_samples = sheet.nrows -1\n\n# defining flags\ntf.app.flags.DEFINE_integer('num_epochs',50,\n 'The number of epochs for training the model. Default = 50')\nFLAGS = tf.app.flags.FLAGS\n\n# creating the weight and bias\nW = tf.Variable(0.0,name=\"weights\")\nb = tf.Variable(0.0,name=\"bias\")\n\ndef inputs():\n \"\"\"\n Defining the place_holders\n :return:\n returning the data and label place holders\n \"\"\"\n\n X = tf.placeholder(tf.float32,name=\"X\")\n Y = tf.placeholder(tf.float32,name=\"Y\")\n return X,Y\n\ndef inference(X):\n \"\"\"\n Forward passing the X.\n :param X: Input\n :return: X*W + b\n \"\"\"\n return X*W +b\n\ndef loss(X,Y):\n \"\"\"\n compute the loss by comparing the predicted value to the actual label.\n :param X: The input\n :param Y: The label\n :return: The loss over the samples\n \"\"\"\n # Making the prediction.\n Y_predicted = inference(X)\n return tf.squared_difference(Y,Y_predicted)\n\ndef train(loss):\n learning_rate = 0.0001\n return tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)\n\n\nwith tf.Session() as sess:\n # Initialize the variables[w and b].\n sess.run(tf.global_variables_initializer())\n\n # Get the input tensors\n X,Y = inputs()\n\n train_loss = loss(X,Y)\n train_op = train(train_loss)\n\n for epoch_num in range(FLAGS.num_epochs):\n for x,y in data:\n train_op = train(train_loss)\n\n loss_value,_ = sess.run([train_loss,train_op],feed_dict={X:x,Y:y})\n\n # Displaying the loss per epoch\n print('epoch %d,loss=%f'%(epoch_num+1,loss_value))\n\n # save the values of weight and bias\n wcoeff, bias = sess.run([W,b])\n\n# evalute and plot\nInput_values = data[:,0]\nLabels = data[:,1]\nPrediction_values = data[:,0]*wcoeff + bias\nplt.plot(Input_values,Labels,'ro',label = 'main')\nplt.plot(Input_values,Prediction_values,label = 'Predicted')\n\n# Saving the result\nplt.legend()\nplt.savefig('plot.png')\nplt.close()\n\n\n\n","sub_path":"TensorFlow_Course_mindSet/ML/linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":2263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"104305096","text":"'''Training the model & testing on Test set in this Script'''\n\nimport numpy as np\nimport time, sys, os\nimport tensorflow as tf\nfrom sklearn.model_selection import train_test_split\nimport argparse\nimport matplotlib.pyplot as plt\n\nimport dataload, model\n\ndef print_function(i, num_batches, acc_temp):\n j = int(i/25)\n sys.stdout.write('\\r{0}'.format('Batches: ' + str(i+1) + '/' + str(num_batches) +\\\n ' [' + '='*j + '>' + (int((num_batches)/25)-j) * ' ' + '] ' + \\\n str(round((i+1)*100/num_batches)) + '%; Accuracy: ' + \\\n str(acc_temp)))\n sys.stdout.flush()\n pass\n\ndef data_augmentation(loader):\n t_loader = []\n \n for i, (batch_x, batch_y) in enumerate(loader):\n sess = tf.Session()\n num_sample = len(batch_x)\n aug_size = int(0.05*num_sample)\n idx = np.random.choice(np.arange(num_sample), replace=False, size=aug_size)\n b_x = batch_x[idx]\n b_y = batch_y[idx]\n\n rot_img = tf.image.flip_left_right(b_x.reshape((aug_size, 28, 28)))\n b_x = sess.run(rot_img).reshape((aug_size, 28 * 28))\n batch_x = np.vstack((batch_x, b_x))\n batch_y = np.vstack((batch_y, b_y))\n\n idx = np.random.choice(np.arange(num_sample), replace=False, size=aug_size)\n b_x = batch_x[idx]\n b_y = batch_y[idx]\n rot_img = tf.image.random_flip_up_down(b_x.reshape((aug_size, 28, 28)))\n b_x = sess.run(rot_img).reshape((aug_size, 28 * 28))\n batch_x = np.vstack((batch_x, b_x))\n batch_y = np.vstack((batch_y, b_y))\n \n idx = np.random.choice(np.arange(num_sample), replace=False, size=aug_size)\n b_x = batch_x[idx]\n b_y = batch_y[idx]\n rot_img = tf.image.random_flip_up_down(b_x.reshape((aug_size, 28, 28)))\n rot_img = tf.image.random_flip_left_right(rot_img, seed = None)\n b_x = sess.run(rot_img).reshape((aug_size, 28 * 28))\n batch_x = np.vstack((batch_x, b_x))\n batch_y = np.vstack((batch_y, b_y))\n\n t_loader.append((batch_x, batch_y))\n sess.close()\n return t_loader\n\ndef train(args = None):\n epochs = args.epochs\n validation = True\n train_stats, valid_stats = [], []\n train_stats_l, valid_stats_l = [], []\n \n if opt == 1:\n optimizer = tf.train.AdamOptimizer(learning_rate=lr).minimize(model.loss)\n else:\n optimizer = tf.train.RMSPropOptimizer(learning_rate=lr).minimize(model.loss)\n \n saver = tf.train.Saver(tf.trainable_variables())\n\n config = tf.ConfigProto(intra_op_parallelism_threads=6, inter_op_parallelism_threads=6,\n device_count = {'CPU': 6})\n \n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n print('Training Started!!')\n \n num_batches = len(train_loader)\n best_valid_acc = 0\n best_valid_loss = 1e+10\n early_limit = 0\n\n for e in range(epochs):\n if early_limit > args.patience:\n print('Early stop! No improvement in Validation Loss since %s Epochs!'%(args.patience))\n print('-'*65)\n break\n \n print('Epoch: ', e+1)\n start = time.time()\n valid_score = 0\n train_accuracies, train_loss = [], []\n\n for i, (batch_x, batch_y) in enumerate(train_loader):\n num_sample = len(batch_x)\n \n acc_temp = round(np.mean(train_accuracies), 4) if len(train_accuracies) != 0 else 'NA'\n print_function(i, num_batches, acc_temp)\n\n _, acc, loss = sess.run([optimizer, model.accuracy, model.loss],\n feed_dict={x: batch_x, y: batch_y})\n train_accuracies.append(acc)\n train_loss.append(loss)\n acc = np.mean(train_accuracies)\n loss_train = np.mean(train_loss)\n t = time.time() - start\n print('\\nTime: %s sec; Traning Accuracy %.3f; Training Loss: %.3f'%(round(t), acc, loss_train))\n\n #Validation\n if validation:\n valid_accuracies, valid_loss = [], []\n for i, (batch_x, batch_y) in enumerate(valid_loader):\n acc, loss = sess.run([model.accuracy, model.loss], feed_dict = {x: batch_x, y: batch_y})\n valid_accuracies.append(acc)\n valid_loss.append(loss)\n valid_acc, loss_valid = np.mean(valid_accuracies), np.mean(valid_loss)\n print('Validation Accuracy %.3f; Validation Loss: %.3f'%(valid_acc, loss_valid))\n if valid_acc > best_valid_acc:\n acc = int(round(valid_acc*100)); best_valid_acc = valid_acc\n print('Best Validation Accuracy achieved at Epoch: %s is %.3f'%(e+1, best_valid_acc))\n ckpt_file = os.path.join(model_dir, 'best_model_da.ckpt')\n saver.save(sess, ckpt_file)\n if loss_valid < best_valid_loss:\n best_valid_loss = loss_valid\n early_limit = 0\n print('-'*65)\n early_limit = early_limit + 1\n train_stats.append(train_accuracies); train_stats_l.append(train_loss)\n valid_stats.append(valid_accuracies); valid_stats_l.append(valid_loss)\n print('Training Completed!!'); print('-'*65)\n return train_stats, train_stats_l, valid_stats, valid_stats_l\n \ndef test(args = None):\n config = tf.ConfigProto(intra_op_parallelism_threads=6, inter_op_parallelism_threads=6,\n device_count = {'CPU': 6})\n saver = tf.train.Saver(tf.trainable_variables())\n\n with tf.Session(config = config) as sess:\n sess.run(tf.local_variables_initializer())\n print('Testing the model on 10000 Images!')\n ckpt_file = os.path.join(model_dir, best_val_model)\n saver.restore(sess, ckpt_file)\n\n test_predicted = []\n for i, (batch_x, batch_y) in enumerate(test_loader):\n correct = sess.run([model.correct_predicted], feed_dict = {x: batch_x, y: batch_y})\n test_predicted.append(np.sum(correct))\n test_acc = np.sum(test_predicted)/num_samples_test\n print('Testing Accuracy %.3f'%(test_acc))\n pass\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-l', '--lr', type=float, default=1e-3, help='Learning rate for Optimiser')\nparser.add_argument('-b', '--batch_size', type=int, default=128, help='Batch size for mini-batch Opitimisation')\nparser.add_argument('-s', '--save_dir', type=str, default='./models/', help='directory to save the model')\nparser.add_argument('-o', '--opt', type=int, default=1, help='1 for Adam, 2 for RMSProp')\nparser.add_argument('-d', '--data_aug', type=int, default=2, help='1 for True, 2 for False')\nparser.add_argument('-t', '--test', type=int, default=0, help='1 for test, 0 for train')\nparser.add_argument('-e', '--epochs', type=int, default=40, help='Number of Epochs')\nparser.add_argument('-p', '--patience', type=int, default=10, help='Early stopping patience')\nparser.add_argument('-k', '--filter_size', type=int, default = 5, help='filter_size to be used for the model')\n\nargs = parser.parse_args()\n\nbatch_size = args.batch_size\nmodel_dir = args.save_dir\ntesting = args.test\nlr = args.lr\nopt = args.opt\nmode = 'train'\n\nLoader = dataload.DataLoader()\nx, y = Loader.load_data()\n\nx_train, x_valid, y_train, y_valid = train_test_split(x, y, test_size = 0.1)\n\nnum_classes = len(np.unique(y_train))\n\nx = tf.placeholder(dtype=tf.float32, shape = (None, x_train.shape[1]))\ny = tf.placeholder(dtype=tf.float32, shape = (None, num_classes))\n\nif args.filter_size == 5:\n model = model.CNN(x, y)\nelse:\n model = model.CNN3(x, y)\n\nbest_val_model = 'best_model_da.ckpt'\n\nif testing:\n x_test, y_test = Loader.load_data(mode = 'test')\n test_loader = Loader.create_batches(x_test, y_test, batch_size = batch_size)\n num_samples_test = x_test.shape[0]\n \n test(args)\nelse:\n if args.data_aug == 1:\n print('Data Augmentation started!!')\n start = time.time()\n loader = Loader.create_batches(x_train, y_train, batch_size = batch_size)\n train_loader = data_augmentation(loader)\n print('Data Augmentation complete! Time Elapsed: %s sec'%(int(time.time()-start)))\n else:\n train_loader = Loader.create_batches(x_train, y_train, batch_size = batch_size)\n \n valid_loader = Loader.create_batches(x_valid, y_valid, batch_size = batch_size)\n\n train_stats, train_stats_l, valid_stats, valid_stats_l = train(args)\n epoch_loss_t = np.mean(train_stats_l, axis = 1)\n epoch_loss_v = np.mean(valid_stats_l, axis = 1)\n\n num_epochs = np.arange(1, len(epoch_loss_t)+1)\n\n plt.plot(num_epochs, epoch_loss_t, marker = 'x')\n plt.plot(num_epochs, epoch_loss_v, marker = 'o')\n plt.xlabel('Epoch number')\n plt.ylabel('Cross Entropy Loss')\n plt.title('Loss vs Epoch')\n plt.legend(['Training Loss', 'Validation Loss'])\n plt.show()","sub_path":"adhoc_projects/fashion_mnist/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":9128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"641135478","text":"import math\ndef calculate_tip(amount, rating):\n d = {\n \"terrible\": 0, #0\n \"poor\" : amount*5/100,\n \"good\" : amount*10/100,\n \"great\" : amount*15/100,\n \"excellent\" : amount*20/100\n }\n return math.ceil(d[rating.lower()]) if(rating.lower() in d) else 'Rating not recognised'\n","sub_path":"Python_Exercises/Codewars/8_KYU/Tip_Calculator/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"256571408","text":"# Copyright (C) NVIDIA. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n__author__ = \"Tomasz Kornuta\"\n\nimport itertools\nfrom torch.nn.functional import pad\n\n\ndef pad_tensors_to_max(tensor_list):\n \"\"\"\n Method returns list of tensors, each padded to the maximum sizes.\n\n Args:\n tensor_list - List of tensor to be padded.\n \"\"\"\n # Get max size of tensors.\n max_sizes = max([t.size() for t in tensor_list])\n\n #print(\"MAX = \", max_sizes)\n # Number of dimensions\n dims = len(max_sizes)\n # Create the list of zeros.\n zero_sizes = [0] * dims\n\n # Pad list of tensors to max size.\n padded_tensors = []\n for tensor in tensor_list:\n # Get list of current sizes.\n cur_sizes = tensor.size()\n\n #print(\"cur_sizes = \", cur_sizes)\n\n # Create the reverted list of \"desired extensions\".\n ext_sizes = [m-c for (m, c) in zip(max_sizes, cur_sizes)][::-1]\n\n #print(\"ext_sizes = \", ext_sizes)\n\n # Interleave two lists.\n pad_sizes = list(itertools.chain(*zip(zero_sizes, ext_sizes)))\n\n #print(\"pad_sizes = \", pad_sizes)\n\n # Pad tensor, starting from last dimension.\n padded_tensor = pad(\n input=tensor,\n pad=pad_sizes,\n mode='constant', value=0)\n\n #print(\"Tensor after padding: \", padded_tensor.size())\n # Add to list.\n padded_tensors.append(padded_tensor)\n\n # Return the padded list.\n return padded_tensors\n","sub_path":"collections/nemo_cv/nemo_cv/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"330028594","text":"# Default imports\nfrom scipy.stats import skew\nimport pandas as pd\nimport numpy as np\n\nny_housing = pd.read_csv('data/train.csv')\ndef skewness_sqrt(df):\n sale = df['SalePrice']\n area = df['GrLivArea']\n\n sale = np.sqrt(sale)\n area = np.sqrt(area)\n\n return skew(area), skew(sale)\n\n# Write your Solution Here:\n","sub_path":"q03_skewness_sqrt/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"339154878","text":"__all__ = (\n \"CONFIG_SCHEMA\",\n \"async_setup\",\n \"async_setup_entry\",\n \"async_unload_entry\",\n \"DOMAIN\",\n)\n\nimport asyncio\nimport logging\nimport re\nfrom datetime import timedelta\nfrom typing import Any, Callable, Dict, Final, List, Mapping, Optional, Tuple\n\nimport voluptuous as vol\nfrom homeassistant.config_entries import ConfigEntry, SOURCE_IMPORT\nfrom homeassistant.const import CONF_DEVICE_ID, CONF_PASSWORD, CONF_USERNAME\nfrom homeassistant.core import callback\nfrom homeassistant.exceptions import ConfigEntryNotReady\nfrom homeassistant.helpers import config_validation as cv\nfrom homeassistant.helpers.event import async_track_time_interval\nfrom homeassistant.helpers.typing import ConfigType, HomeAssistantType\n\nfrom custom_components.pik_intercom._base import phone_validator\nfrom custom_components.pik_intercom.api import (\n DEFAULT_CLIENT_APP,\n DEFAULT_CLIENT_OS,\n DEFAULT_CLIENT_VERSION,\n DEFAULT_USER_AGENT,\n PikIntercomAPI,\n PikIntercomException,\n)\nfrom custom_components.pik_intercom.const import (\n CONF_AUTH_UPDATE_INTERVAL,\n CONF_CALL_SESSIONS_UPDATE_INTERVAL,\n CONF_CLIENT_APP,\n CONF_CLIENT_OS,\n CONF_CLIENT_VERSION,\n CONF_INTERCOMS_UPDATE_INTERVAL,\n CONF_USER_AGENT,\n DATA_ENTITIES,\n DATA_ENTITY_UPDATERS,\n DATA_FINAL_CONFIG,\n DATA_REAUTHENTICATORS,\n DATA_UPDATE_LISTENERS,\n DATA_YAML_CONFIG,\n DEFAULT_AUTH_UPDATE_INTERVAL,\n DEFAULT_CALL_SESSIONS_UPDATE_INTERVAL,\n DEFAULT_INTERCOMS_UPDATE_INTERVAL,\n DOMAIN,\n MIN_AUTH_UPDATE_INTERVAL,\n MIN_CALL_SESSIONS_UPDATE_INTERVAL,\n MIN_DEVICE_ID_LENGTH,\n SUPPORTED_PLATFORMS,\n UPDATE_CONFIG_KEY_CALL_SESSIONS,\n UPDATE_CONFIG_KEY_INTERCOMS,\n)\n\n_LOGGER: Final = logging.getLogger(__name__)\n\nCONFIG_ENTRY_SCHEMA: Final = vol.Schema(\n {\n vol.Required(CONF_USERNAME): vol.All(cv.string, vol.Any(phone_validator, vol.Email)),\n vol.Required(CONF_PASSWORD): cv.string,\n # Update intervals\n vol.Optional(\n CONF_INTERCOMS_UPDATE_INTERVAL,\n default=timedelta(seconds=DEFAULT_INTERCOMS_UPDATE_INTERVAL),\n description=\"Intercoms update interval\",\n ): vol.All(\n cv.positive_time_period,\n vol.Clamp(min=timedelta(seconds=MIN_CALL_SESSIONS_UPDATE_INTERVAL)),\n ),\n vol.Optional(\n CONF_CALL_SESSIONS_UPDATE_INTERVAL,\n default=timedelta(seconds=DEFAULT_CALL_SESSIONS_UPDATE_INTERVAL),\n description=\"Call sessions update interval\",\n ): vol.All(\n cv.positive_time_period,\n vol.Clamp(min=timedelta(seconds=MIN_CALL_SESSIONS_UPDATE_INTERVAL)),\n ),\n vol.Optional(\n CONF_AUTH_UPDATE_INTERVAL,\n default=timedelta(seconds=DEFAULT_AUTH_UPDATE_INTERVAL),\n description=\"Authentication update interval\",\n ): vol.All(\n cv.positive_time_period,\n vol.Clamp(min=timedelta(seconds=MIN_AUTH_UPDATE_INTERVAL)),\n ),\n # Additional parameters\n vol.Optional(CONF_CLIENT_APP, default=DEFAULT_CLIENT_APP): cv.string,\n vol.Optional(CONF_CLIENT_OS, default=DEFAULT_CLIENT_OS): cv.string,\n vol.Optional(CONF_CLIENT_VERSION, default=DEFAULT_CLIENT_VERSION): cv.string,\n vol.Optional(CONF_USER_AGENT, default=DEFAULT_USER_AGENT): cv.string,\n vol.Optional(CONF_DEVICE_ID, default=None): vol.Any(\n vol.Equal(None),\n vol.All(cv.string, vol.Length(min=MIN_DEVICE_ID_LENGTH)),\n ),\n }\n)\n\n\ndef _unique_entries(value: List[Mapping[str, Any]]) -> List[Mapping[str, Any]]:\n users: Dict[Tuple[str, str], Optional[int]] = {}\n\n errors = []\n for i, config in enumerate(value):\n user = config[CONF_USERNAME]\n if user in users:\n if users[user] is not None:\n errors.append(\n vol.Invalid(\"duplicate unique key, first encounter\", path=[users[user]])\n )\n users[user] = None\n errors.append(vol.Invalid(\"duplicate unique key, subsequent encounter\", path=[i]))\n else:\n users[user] = i\n\n if errors:\n if len(errors) > 1:\n raise vol.MultipleInvalid(errors)\n raise next(iter(errors))\n\n return value\n\n\nCONFIG_SCHEMA: Final = vol.Schema(\n {\n DOMAIN: vol.Any(\n vol.Equal({}),\n vol.All(\n cv.ensure_list,\n vol.Length(min=1),\n [CONFIG_ENTRY_SCHEMA],\n _unique_entries,\n ),\n )\n },\n extra=vol.ALLOW_EXTRA,\n)\n\n\n@callback\ndef _find_existing_entry(hass: HomeAssistantType, username: str) -> Optional[ConfigEntry]:\n existing_entries = hass.config_entries.async_entries(DOMAIN)\n for config_entry in existing_entries:\n if config_entry.data[CONF_USERNAME] == username:\n return config_entry\n\n\n_RE_USERNAME_MASK = re.compile(r\"^(\\W*)(.).*(.)$\")\n\n\ndef mask_username(username: str):\n parts = username.split(\"@\")\n return \"@\".join(map(lambda x: _RE_USERNAME_MASK.sub(r\"\\1\\2***\\3\", x), parts))\n\n\ndef _patch_haffmpeg():\n \"\"\"Patch HA ffmpeg adapter to put rtsp_transport before input stream when\n a certain non-existent command line argument (input_rtsp_transport) is provided.\n\n \"\"\"\n\n from haffmpeg.core import HAFFmpeg\n\n if hasattr(HAFFmpeg, \"_orig_generate_ffmpeg_cmd\"):\n return\n\n HAFFmpeg._orig_generate_ffmpeg_cmd = HAFFmpeg._generate_ffmpeg_cmd\n\n def _generate_ffmpeg_cmd(self, *args, **kwargs) -> None:\n \"\"\"Generate ffmpeg command line (patched to support input_rtsp_transport argument).\"\"\"\n self._orig_generate_ffmpeg_cmd(*args, **kwargs)\n\n _argv = self._argv\n try:\n rtsp_flags_index = _argv.index(\"-prefix_rtsp_flags\")\n except ValueError:\n return\n try:\n rtsp_transport_spec = _argv[rtsp_flags_index + 1]\n except IndexError:\n return\n else:\n if not rtsp_transport_spec.startswith(\"-\"):\n del _argv[rtsp_flags_index : rtsp_flags_index + 2]\n _argv.insert(1, \"-rtsp_flags\")\n _argv.insert(2, rtsp_transport_spec)\n\n HAFFmpeg._generate_ffmpeg_cmd = _generate_ffmpeg_cmd\n\n\nasync def async_setup(hass: HomeAssistantType, config: ConfigType):\n \"\"\"Set up the PIK Intercom component.\"\"\"\n _patch_haffmpeg()\n\n domain_config = config.get(DOMAIN)\n if not domain_config:\n return True\n\n domain_data = {}\n hass.data[DOMAIN] = domain_data\n\n yaml_config = {}\n hass.data[DATA_YAML_CONFIG] = yaml_config\n\n for user_cfg in domain_config:\n if not user_cfg:\n continue\n\n username: str = user_cfg[CONF_USERNAME]\n\n key = username\n log_prefix = f\"[{mask_username(username)}] \"\n\n _LOGGER.debug(log_prefix + \"Получена конфигурация из YAML\")\n\n existing_entry = _find_existing_entry(hass, username)\n if existing_entry:\n if existing_entry.source == SOURCE_IMPORT:\n yaml_config[key] = user_cfg\n _LOGGER.debug(log_prefix + \"Соответствующая конфигурационная запись существует\")\n else:\n _LOGGER.warning(\n log_prefix + \"Конфигурация из YAML переопределена другой конфигурацией!\"\n )\n continue\n\n # Save YAML configuration\n yaml_config[key] = user_cfg\n\n _LOGGER.warning(log_prefix + \"Создание новой конфигурационной записи\")\n\n hass.async_create_task(\n hass.config_entries.flow.async_init(\n DOMAIN,\n context={\"source\": SOURCE_IMPORT},\n data={CONF_USERNAME: username},\n )\n )\n\n if not yaml_config:\n _LOGGER.debug(\"Конфигурация из YAML не обнаружена\")\n\n return True\n\n\nasync def async_setup_entry(hass: HomeAssistantType, config_entry: ConfigEntry):\n username = config_entry.data[CONF_USERNAME]\n unique_key = username\n config_entry_id = config_entry.entry_id\n log_prefix = f\"[{mask_username(username)}] \"\n hass_data = hass.data\n\n _LOGGER.debug(log_prefix + \"Setting up config entry\")\n\n # Source full configuration\n if config_entry.source == SOURCE_IMPORT:\n # Source configuration from YAML\n yaml_config = hass_data.get(DATA_YAML_CONFIG)\n\n if not yaml_config or unique_key not in yaml_config:\n _LOGGER.info(\n log_prefix\n + f\"Удаление записи {config_entry_id} после удаления из конфигурации YAML\"\n )\n hass.async_create_task(hass.config_entries.async_remove(config_entry_id))\n return False\n\n user_cfg = yaml_config[unique_key]\n\n else:\n # Source and convert configuration from input post_fields\n try:\n user_cfg = CONFIG_ENTRY_SCHEMA(\n {\n **config_entry.data,\n **config_entry.options,\n }\n )\n except vol.Invalid as e:\n _LOGGER.error(log_prefix + \"Сохранённая конфигурация повреждена\" + \": \" + repr(e))\n return False\n\n _LOGGER.info(log_prefix + \"Применение конфигурационной записи\")\n\n device_id = user_cfg.get(CONF_DEVICE_ID)\n if not device_id:\n device_id = config_entry.entry_id[-16:]\n user_cfg[CONF_DEVICE_ID] = device_id\n used_device_id_source = \"полученный из ID записи\"\n else:\n used_device_id_source = \"заданный пользователем\"\n\n _LOGGER.debug(log_prefix + f\"Используемый device_id: {device_id} ({used_device_id_source})\")\n\n api_object = PikIntercomAPI(\n username=username,\n password=user_cfg[CONF_PASSWORD],\n device_id=device_id,\n client_app=user_cfg[CONF_CLIENT_APP],\n client_os=user_cfg[CONF_CLIENT_OS],\n client_version=user_cfg[CONF_CLIENT_VERSION],\n user_agent=user_cfg[CONF_USER_AGENT],\n )\n\n try:\n await api_object.async_authenticate()\n\n # Fetch all properties\n await api_object.async_update_properties()\n\n except PikIntercomException as e:\n _LOGGER.error(log_prefix + \"Невозможно выполнить авторизацию: \" + repr(e))\n await api_object.async_close()\n raise ConfigEntryNotReady(f\"{e}\")\n\n apartments = api_object.properties\n\n if not apartments:\n # Cancel setup because no accounts provided\n _LOGGER.warning(log_prefix + \"Владения найдены\")\n await api_object.async_close()\n return False\n\n tasks = []\n for apartment_object in apartments.values():\n tasks.append(apartment_object.async_update_intercoms())\n\n if tasks:\n done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_EXCEPTION)\n if pending:\n for task in pending:\n task.cancel()\n\n first_task = next(iter(done))\n exc_first_task = first_task.exception()\n if exc_first_task:\n await api_object.async_close()\n raise ConfigEntryNotReady(f\"Ошибка при обновлении данных: {exc_first_task}\")\n\n _LOGGER.debug(log_prefix + f\"Найдено {len(apartments)} владений\")\n\n api_objects: Dict[str, \"PikIntercomAPI\"] = hass_data.setdefault(DOMAIN, {})\n\n # Create placeholders\n api_objects[config_entry_id] = api_object\n hass_data.setdefault(DATA_ENTITIES, {})[config_entry_id] = []\n hass_data.setdefault(DATA_FINAL_CONFIG, {})[config_entry_id] = user_cfg\n hass_data.setdefault(DATA_ENTITY_UPDATERS, {})[config_entry_id] = {}\n\n # Forward entry setup to sensor platform\n for domain in SUPPORTED_PLATFORMS:\n hass.async_create_task(\n hass.config_entries.async_forward_entry_setup(\n config_entry,\n domain,\n )\n )\n\n # Create options update listener\n update_listener = config_entry.add_update_listener(async_reload_entry)\n hass_data.setdefault(DATA_UPDATE_LISTENERS, {})[config_entry_id] = update_listener\n\n # Create reauth listener\n async def async_reauthenticate(*_):\n _LOGGER.debug(log_prefix + \"Выполнение профилактической реавторизации\")\n\n await api_object.async_authenticate()\n\n auth_update_interval = user_cfg[CONF_AUTH_UPDATE_INTERVAL]\n\n _LOGGER.debug(\n log_prefix + f\"Планирование профилактической ревторизации \"\n f\"(интервал: {auth_update_interval.total_seconds()} секунд)\"\n )\n\n hass.data.setdefault(DATA_REAUTHENTICATORS, {})[config_entry_id] = async_track_time_interval(\n hass,\n async_reauthenticate,\n auth_update_interval,\n )\n\n _LOGGER.debug(log_prefix + \"Применение конфигурации успешно\")\n return True\n\n\nasync def async_reload_entry(\n hass: HomeAssistantType,\n config_entry: ConfigEntry,\n) -> None:\n \"\"\"Reload Lkcomu TNS Energo entry\"\"\"\n log_prefix = f\"[{mask_username(config_entry.data[CONF_USERNAME])}] \"\n _LOGGER.info(log_prefix + \"Перезагрузка интеграции\")\n await hass.config_entries.async_reload(config_entry.entry_id)\n\n\nasync def async_migrate_entry(hass: HomeAssistantType, config_entry: ConfigEntry) -> bool:\n username = config_entry.data[CONF_USERNAME]\n\n from custom_components.pik_intercom.config_flow import PikIntercomConfigFlow, DEFAULT_OPTIONS\n\n log_prefix = f\"[{mask_username(username)}] \"\n _LOGGER.info(\n log_prefix + f\"Обновление конфигурационной записи с версии \"\n f\"{config_entry.version} до {PikIntercomConfigFlow.VERSION}\"\n )\n\n data = dict(config_entry.data)\n options = dict(config_entry.options)\n\n if config_entry.version < 2 and config_entry.source == SOURCE_IMPORT:\n options = DEFAULT_OPTIONS\n\n config_entry.version = PikIntercomConfigFlow.VERSION\n hass.config_entries.async_update_entry(config_entry, data=data, options=options)\n\n return True\n\n\nasync def async_unload_entry(hass: HomeAssistantType, config_entry: ConfigEntry):\n log_prefix = f\"[{mask_username(config_entry.data[CONF_USERNAME])}] \"\n entry_id = config_entry.entry_id\n\n tasks = [\n hass.config_entries.async_forward_entry_unload(config_entry, domain)\n for domain in SUPPORTED_PLATFORMS\n ]\n\n unload_ok = all(await asyncio.gather(*tasks))\n\n if unload_ok:\n # Cancel entity updaters\n for update_identifier, cancel_func in hass.data[DATA_ENTITY_UPDATERS].pop(entry_id).items():\n cancel_func()\n\n # Cancel reauthentication routines\n reauthenticator: Callable = hass.data.get(DATA_REAUTHENTICATORS, {}).pop(entry_id, None)\n if reauthenticator:\n reauthenticator()\n\n # Cancel entry update listeners\n cancel_listener = hass.data[DATA_UPDATE_LISTENERS].pop(entry_id)\n cancel_listener()\n\n # Close API object\n api_object: PikIntercomAPI = hass.data.get(DOMAIN, {}).pop(entry_id, None)\n if api_object:\n await api_object.async_close()\n\n # Remove final configuration holder\n hass.data[DATA_FINAL_CONFIG].pop(entry_id)\n\n # Remove entity holder\n hass.data[DATA_ENTITIES].pop(entry_id)\n\n _LOGGER.info(log_prefix + \"Интеграция выгружена\")\n\n else:\n _LOGGER.warning(log_prefix + \"При выгрузке конфигурации произошла ошибка\")\n\n return unload_ok\n","sub_path":"custom_components/pik_intercom/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":15854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"316743865","text":"import io\nfrom os.path import dirname, join\nfrom setuptools import setup\n\n\n# read the contents of your README file\nfrom os import path\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\n\ndef get_version(relpath):\n \"\"\"Read version info from a file without importing it\"\"\"\n for line in io.open(join(dirname(__file__), relpath), encoding=\"cp437\"):\n if \"__version__\" in line:\n if '\"' in line:\n # __version__ = \"0.9\"\n return line.split('\"')[1]\n elif \"'\" in line:\n return line.split(\"'\")[1]\n\n\nsetup(\n name='metagenome-atlas',\n version=get_version(\"atlas/__init__.py\"),\n url='https://github.com/metagenome-atlas/atlas',\n license='BSD-3',\n author='Joe Brown, Silas Kieser',\n author_email='brwnjm@gmail.com, silas.kieser@gmail.com',\n description='ATLAS - workflows for assembly, annotation, and genomic binning of metagenomic and metatranscriptomic data.',\n long_description=long_description,\n long_description_content_type='text/markdown',\n packages=['atlas'],\n package_data={'': [\n \"atlas/*\",\n ]},\n data_files=[(\".\", [\"README.md\", \"LICENSE.txt\"])],\n include_package_data=True,\n install_requires= [\n ],\n # install via conda: click, pandas, pyyaml, snakemake\n entry_points={\n 'console_scripts': [\n 'atlas = atlas.atlas:cli'\n ]\n },\n classifiers=[\"Topic :: Scientific/Engineering :: Bio-Informatics\"],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"243452623","text":"from IrisParser import kmeansParser\nfrom Cluster import clust\nfrom KM import kmeans\nimport sys\n\ndef main(argv):\n if len(argv)!=1:\n print(\"Invalid function call: kmeans.py \")\n return\n\n dataSet = sys.argv[1]\n data = kmeansParser(dataSet)\n data = data.returnList()\n KM = kmeans(data)\n KM.run()\n\n return\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"92099406","text":"import my_io as io\nimport my_srcor as srcor \nfrom collections import defaultdict\nimport numpy as np\nimport astropy.coordinates as cor\nimport astropy.units as u\nimport os\nfrom magic_pylab import *\nimport my_ds9 as ds9\nfrom astropy import wcs\nfrom magic_pylab import *\n\n#my = io.read('photo-z.txt',fmt = 2).dat\n#goods = io.read('goods_z.txt', fmt = 2).dat\nwht_img = io.read('low_res_goods_V_wht.fits').dat[0].data\nwht_img = wht_img.transpose()\nhd = wcs.WCS('hd.fits')\n\ndzc = my.columns['dzc']\nid = dzc < 0.33 * 1.0857\ndetect = my.columns['detect'][id]\nra_my = my.columns['RA'][id]\nde_my = my.columns['DEC'][id]\nmag_z = my.columns['zc'][id]\nerr_z = my.columns['dzc'][id]\n\n#id_z_detect = []\n#for idx in range(len(ra_my)):\n#\tdet_band = my.columns['detect'][idx]\n#\tif ('Z' in det_band):\n#\t\tid_z_detect.append(idx)\n#detect = my.columns['detect'][id_z_detect]\n#ra_my = my.columns['RA'] [id_z_detect]\n#de_my = my.columns['DEC'] [id_z_detect]\n#mag_z = my.columns['zc'] [id_z_detect]\n#err_z = my.columns['dzc'] [id_z_detect]\n\n\nsn = goods.columns[43] / goods.columns[44]\nid = sn > 3\nmag_goods = goods.columns[41][id]\nra_goods = goods.columns[1][id]\nde_goods = goods.columns[2][id]\n\nx_my, y_my = hd.wcs_world2pix(ra_my, de_my, 0)\nx_goods, y_goods = hd.wcs_world2pix(ra_goods, de_goods, 0)\n\nid_in_goods = []\nid_goods_ok= []\nthresh = 0.1\nfor idx in range(len(ra_my)):\n\tif wht_img[x_my[idx], y_my[idx]] > thresh:\n\t\tid_in_goods.append(idx)\nfor idx in range(len(ra_goods)):\n\tif wht_img[x_goods[idx], y_goods[idx]] > thresh:\n\t\tid_goods_ok.append(idx)\n\n\nmag_z, err_z = mag_z[id_in_goods], err_z[id_in_goods]\nra_my, de_my = ra_my[id_in_goods], de_my[id_in_goods]\nmag_goods = mag_goods[id_goods_ok]\nra_goods, de_goods = ra_goods[id_goods_ok], de_goods[id_goods_ok]\n\nid_ok = (mag_z > 0) & (err_z < 1.08)\nmag_z, err_z = mag_z[id_ok], err_z[id_ok]\nra_my, de_my = ra_my[id_ok], de_my[id_ok]\n\n#mag_z = mag_z + 0.10\nmag_goods = mag_goods - 0.07\nhist_my = histogram(mag_z, range=(20,29), bins=18)\nhist_goods = histogram(mag_goods, range=(20,29), bins=18)\nnum_my = hist_my[0]\nnum_goods = hist_goods[0]\nx_mag = [(hist_my[1][idx] + hist_my[1][idx+1]) / 2 \\\n\t\t\tfor idx in range(len(hist_my[0]))]\n\narea = 0.0511719236111\nnum_my /= (area * 0.5)\nnum_goods /= (area * 0.5)\nsemilogy(x_mag, num_my, 'sr', \\\n\t\tmarkersize=8, mec='r') #, markerfacecolor='white')\nx1_mag = [mag + 0.0 for mag in x_mag]\nsemilogy(x1_mag, num_goods, 'Db', \\\n\t\tmarkersize=8, mec='b')#, markerfacecolor='white')\n#hist(mag_z, range=(20,29), bins=18, histtype='step', color='r')\n#hist(mag_goods, range=(20,29), bins=18, histtype='step', color='b')\n\n#id_bug = (mag_z > 24) & (mag_z < 24.5)\n#ra_my, de_my = ra_my[id_bug], de_my[id_bug]\n#ds9.reg_large(ra_my, de_my, fmt = 1, hd = hd, rad=3, color = 'red', fname = 'my_24to24p5.reg')\n#\n#id_bug = (mag_goods > 24) & (mag_goods < 24.5)\n#ra_goods, de_goods = ra_goods[id_bug], de_goods[id_bug]\n#ds9.reg_large(ra_goods, de_goods, fmt = 1, hd = hd, rad=2, color = 'green', \n#\t\t\t fname = 'goods_24to24p5.reg')\n\n#ytik = yticks()[0]\n#ytik = int32(ytik / (1e4 * area)) * area * 1e4 \n##labels = arange(0, 12e4, 1e4)\n#yticks(ytik, int32(ytik / (area * 1e4)))\n#\nylabel(r'$\\rm{Number\\ Density\\ (deg^{-2}mag^{-1})} $', fontsize = 17)\nxlabel(r'$zc\\ (z)\\ \\rm{mag} $', fontsize = 17)\nlegend(('This work','HST/ACS z band'), loc = 'upper left')\n\nsavefig('completeness.eps', format='eps', bbox_inches='tight')\nclose()\nos.system('open completeness.eps')\n\n","sub_path":"src_dense.py","file_name":"src_dense.py","file_ext":"py","file_size_in_byte":3428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"437214069","text":"from unittest import TestCase\nfrom maze import input_loop\nimport io\nimport unittest.mock\n\n\nclass TestInputLoop(TestCase):\n\n @unittest.mock.patch('builtins.input', return_value='n')\n def test_return_value(self, mock_input): # check that returned string is the user choice\n self.assertEqual(input_loop('Test', ['N']), 'N')\n\n @unittest.mock.patch('builtins.input', side_effect=['a', 'b', 'n'])\n @unittest.mock.patch('sys.stdout', new_callable=io.StringIO)\n def test_invalid_input(self, mock_stdout, mock_input): # check for error messages on invalid input\n expected_output = 'Sorry, that wasn\\'t a valid input. Please try again.\\n' \\\n 'Sorry, that wasn\\'t a valid input. Please try again.\\n'\n input_loop('Test', ['N'])\n self.assertEqual(mock_stdout.getvalue(), expected_output)\n","sub_path":"lab08/test_input_loop.py","file_name":"test_input_loop.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"340106506","text":"import numpy as np\nimport coremltools.proto.FeatureTypes_pb2 as ft\ndef printTop5(resultsDict):\n # put probabilities and labels into their own lists\n probs = np.array(list(resultsDict.values()))\n labels = list(resultsDict.keys())\n # find the indices of the 5 classes with the highest probabilities\n top5Probs = probs.argsort()[-5:][::-1]\n\n # find the corresponding labels and probabilities\n top5Results = map(lambda x:labels[x],probs[x],top5Probs)\n #print them from high to low\n for label,prob in top5Results:\n print(\"%.5f %s\" %(prob,label))\n\ndef get__nn(spec):\n if spec.WhichOneOf('Type') == 'neuralNetwork':\n return spec.neuralNetwork\n elif spec.WhichOneOf('Type') == 'neuralNetworkClassifier':\n return spec.neuralNetworkClassifier\n elif spec.WhichOneOf('Type') == 'neuralNetworkRegressor':\n return spec.neuralNetworkRegressor\n else:\n raise ValueError(\"MLModel does not have a neural network\")\n \ndef convert_multiArray_to_image(feature,is_bgr= False):\n import coremltools.proto.FeatureType_pb2 as ft\n if feature.type.WhichOneOf(\"Type\") != \"multiArrayType\":\n raise ValueError(\"%s is not a multiArray type\" % feature.name)\n shape = tuple(feature.type.multiArrayType.shape)\n channels = None\n if len(shape) == 2:\n channels = 1\n height,width = shape\n elif len(shape) == 3:\n channels,height,width = shape\n if channels != 1 and channels != 3:\n raise ValueError(\"Shape{} not supported for image type\".format(shape))\n if channels == 1:\n feature.type.imageType.colorSpace = ft.ImageFeatureType.GRAYSCALE\n elif channels == 3:\n if is_bgr:\n feature.type.imageType.colorSpace = ft.ImageFeatureType.BGR\n else:\n feature.type.imageType.colorSpace = ft.ImageFeatureType.RGB\n feature.type.imageType.width = width\n feature.type.imageType.height = height\n \ndef update_multiarray_to_float32(feature):\n if feature.type.HasField(\"multiArrayType\"):\n feature.type.multiArrayType.dataType = ft.ArrayFeatureType.FLOAT32\n \n","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"602307865","text":"from __future__ import print_function\nfrom __future__ import division\nimport theano\nimport numpy as np\nfrom theano import tensor as T\nfrom theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\nimport theano.sparse.basic as sp\nfrom theano.tensor.elemwise import CAReduce\n# from activations import *\n# from inits import *\n# from utils import *\n# from Dropout import Dropout\nfrom headers import *\n\n\nclass GraphConvolution_tp(object):\n\n\tdef __init__(self, size, adjacency, rng=None, init='glorot', bias=False, activation_str='rectify', weights=False, g_low=-10.0, g_high=10.0):\n\n\t\tself.settings = locals()\n\t\tdel self.settings['self']\n\t\tself.size = size\n\t\tself.rng = rng\n\t\tself.init = getattr(inits, init)\n\t\tself.activation = getattr(activations, activation_str)\n\t\tself.weights = weights\n\t\tself.bias = bias\n\t\tself.adjacency = adjacency\n\t\tself.numparams = 0\n\t\tself.g_low = g_low\n\t\tself.g_high = g_high\n\n\n\tdef connect(self, layer_below):\n\t\tself.layer_below = layer_below\n\t\tself.inputD = 2*layer_below.size # Weights act on the concatenation\n\n\t\tself.W = list()\n\t\tself.W_t = list()\n\n\t\tif self.bias:\n\t\t\tself.b = list()\n\t\tself.nonzeros = {}\n\t\tfor i in range(np.shape(self.adjacency)[0]):\n\t\t\tcount = 0\n\t\t\tself.nonzeros[i] = []\n\t\t\tfor j in range(np.shape(self.adjacency)[1]):\n\t\t\t\tif(self.adjacency[i, j]):\n\t\t\t\t\tself.nonzeros[i].append(j)\n\t\t\t\t\tcount += 1\n\t\t\tself.W.append(self.init((count, self.inputD, self.size), rng=self.rng))\n\t\t\tself.numparams += count*self.inputD*self.size\n\t\t\tif self.bias:\n\t\t\t\tself.b = zero0s((count, self.size))\n\t\t\t\tself.numparams += count*self.size\n\t\t\tself.W_t.append(self.init((2*self.size, self.size), rng=self.rng))\n\t\t\tself.numparams += self.size*self.size\n\t\tself.params = []\n\t\tself.params += self.W\n\t\tprint (\"====== GCN ===========\")\n\t\tself.paramstr = \"Nodes = {0}, Size = ({1}X{2}X{3})\".format(np.shape(self.adjacency)[0], count, self.inputD, self.size)\n\t\tprint(self.paramstr)\n\t\tprint (\"====== +++ ===========\")\n\t\t\n\t\tif self.bias:\n\t\t\tself.params += self.b\n\n\t\tif (self.weights):\n\t\t\tfor param, weight in zip(self.params, self.weights):\n\t\t\t\tparam.set_value(np.asarray(weight, dtype=theano.config.floatX))\n\n\t\tself.L2_sqr = 0\n\t\tfor W in self.W:\n\t\t\tself.L2_sqr += (W ** 2).sum()\n\t\t\n\t\tself.h0 = zero0s((1,1,self.size))\n\t\n\tdef recurrence_efficient(self,x,h_tm1):\n\t\n\t\th_tm1 = theano.gradient.grad_clip(h_tm1, self.g_low, self.g_high)\n\t\tfor i in range(np.shape(self.adjacency)[0]):\n\t\t\tt = x[:, i, :]\n\t\t\tc = h_tm1[:, i, :]\n\t\t\td = T.concatenate((t,c),axis=1)\n\t\t\tb = T.tensordot(d, self.W_t[i], axes=[1, 0])\n\t\t\ta = t + b\n\t\t\ta = a.reshape((a.shape[0], 1, a.shape[1]))\n\t\t\tif(i==0):\n\t\t\t\tout = a\n\t\t\telse:\n\t\t \t\tout = T.concatenate((out, a), axis=1)\n\t\t \t\t\n\t\treturn self.activation(out)\n\n\tdef output(self, seq_output=True):\n\t\tx = self.layer_below.output(seq_output=seq_output)\n\t\tfor i in range(np.shape(self.adjacency)[0]):\n\t\t\tfor j in range(len(self.nonzeros[i])):\n\t\t\t\t\n\t\t\t\td = T.concatenate((x[:, :, i, :], x[:, :, self.nonzeros[i][j], :]),axis=2)\n\t\t\t\tif(j==0):\n\t\t\t\t\tout_d = T.tensordot(d,self.W[i][j, :, :], axes=[2, 0])\n\t\t\t\telse:\n\t\t\t\t\tout_d += T.tensordot(d,self.W[i][j, :, :], axes=[2, 0])\n\t\t\t\tif self.bias:\n\t\t\t\t\tout_d += self.b[i][j, :]\n\t\t\tif(i == 0):\n\t\t\t\toutpt = out_d.reshape((out_d.shape[0], out_d.shape[1], 1, out_d.shape[2]))\n\t\t\telse:\n\t\t\t\toutpt = T.concatenate((outpt, out_d.reshape((out_d.shape[0], out_d.shape[1], 1, out_d.shape[2]))), axis=2)\n\n\t\n\t\t# return self.activation(out)\n\n\t\th_init = T.extra_ops.repeat(self.h0, x.shape[2], axis=1)\n\t\th_init = T.extra_ops.repeat(h_init, x.shape[1], axis=0)\n\n\t\tout, _ = theano.scan(fn=self.recurrence_efficient,\n\t\t\t\tsequences=[outpt],\n\t\t\t\toutputs_info=[h_init],\n\t\t\t\tn_steps=x.shape[0],\n\t\t\t)\n\t\treturn out\n","sub_path":"code/neuralmodels/layers/GraphConvolution_temporal_pairwise.py","file_name":"GraphConvolution_temporal_pairwise.py","file_ext":"py","file_size_in_byte":3663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"331891373","text":"# %%\nimport numpy as np\n\n#%%\ndef parser(filename):\n with open(filename) as f:\n data = [(instr[0], int(instr[1:])) for instr in f.read().splitlines()]\n return data\n\n# %%\ndef travel(instructions):\n dir_vector = {\"E\" : np.array([1, 0]), \"S\": np.array([0, -1]), \"W\": np.array([-1, 0]), \"N\": np.array([0, 1])}\n dirs = \"ESWN\"\n \n dir = 0 # current direction idx\n pos = np.array([0, 0]) # current position\n\n for instr, value in instructions:\n if instr == \"F\":\n pos += value * dir_vector[dirs[dir]]\n elif instr in dirs:\n pos += value * dir_vector[instr]\n elif instr in \"RL\":\n sign = 1 if instr == \"R\" else -1\n dir = (dir + sign * value//90) % len(dirs)\n\n return pos\n\n# %%\ndef travel_with_waypoint(instructions):\n dir_vector = {\"E\" : np.array([1, 0]), \"S\": np.array([0, -1]), \"W\": np.array([-1, 0]), \"N\": np.array([0, 1])}\n rotation = lambda theta: np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]], dtype=int)\n dirs = \"ESWN\"\n\n pos = np.array([0, 0], dtype=int) # current position\n wpt = np.array([10, 1], dtype=int) # current waypoint position\n\n for instr, value in instructions:\n if instr == \"F\": # move 'value' times forward to the waypoint\n pos += value * wpt\n elif instr in dirs: # move the waypoint to E/S/W/N by the given value\n wpt += value * dir_vector[instr]\n elif instr in \"RL\": # rotate the waypoint to R/L by the given value (degree)\n sign = 1 if instr == \"R\" else -1\n wpt = wpt @ rotation(np.deg2rad(sign * value))\n\n return pos\n\n# %%\nif __name__ == \"__main__\":\n from os.path import dirname, join, realpath, basename\n folder = join(dirname(dirname(realpath(__file__))), \"data\")\n instructions = parser(f\"{folder}/day12.txt\")\n\n print(\"Part 1 —\", np.abs(travel(instructions)).sum())\n print(\"Part 2 —\", np.abs(travel_with_waypoint(instructions)).sum())\n","sub_path":"src/day12.py","file_name":"day12.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"587420093","text":"import _init_paths\n# from fcn.test import test_net_images\nfrom datasets.factory import get_imdb\n\nfrom utils.blob import pad_im, unpad_im # im_list_to_blob\n\nimport time, os, sys\nimport os.path as osp\nimport numpy as np\nimport cv2\nimport scipy.io as sio\n\nimport torch\nimport torch.nn as nn\n\n# mgc = get_ipython().magic\n# mgc('%matplotlib WXAgg')\n\nfrom demo import _get_image_blob\n\n\nif __name__ == '__main__':\n # import pprint\n from utils.nms import nms\n from convert_to_pth import PoseCNN, get_meta_info\n\n imdb_name = \"lov_keyframe\"\n imdb = get_imdb(imdb_name)\n\n # variables\n extents = imdb._extents\n points = imdb._points_all\n symmetry = imdb._symmetry\n num_classes = imdb.num_classes\n\n im_scale = 1.0\n\n extents, poses, _ = get_meta_info(num_classes)\n # K = meta_data[0,:9]\n # K = np.reshape(K, (3,3))\n # factor_depth = 10000\n # # K[2,2] = 1\n\n # construct the filenames\n # demo_dir = 'data/demo_images/'\n demo_dir = 'data/LOV/data/0001/'\n\n rgb_filenames = sorted([demo_dir + f for f in os.listdir(demo_dir) if f.endswith(\"color.png\")])\n depth_filenames = sorted([demo_dir + f for f in os.listdir(demo_dir) if f.endswith(\"depth.png\")])\n print(rgb_filenames)\n print(depth_filenames)\n\n meta_file = demo_dir + '000001-meta.mat'\n meta_data = sio.loadmat(meta_file)\n K = meta_data['intrinsic_matrix']\n factor_depth = float(meta_data['factor_depth'])\n\n # load network\n # model_file = \"posecnn.pth\"\n # model_file = \"output/lov/lov_debug/vgg16_fcn_color_single_frame_2d_pose_add_lov_iter_100.pth\"\n model_file = \"output/lov/lov_debug/vgg16_lov_iter_200.pth\"\n model = PoseCNN(64, num_classes)\n model.load_state_dict(torch.load(model_file))\n print(\"Loaded model %s\"%model_file)\n\n model.eval()\n model.cuda()\n\n\n PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])\n\n FCT = torch.cuda.FloatTensor\n\n # START\n for idx in range(len(rgb_filenames)):\n im_file = rgb_filenames[idx]\n depth_file = depth_filenames[idx]\n img = cv2.imread(im_file, cv2.IMREAD_UNCHANGED)\n im = pad_im(img, 16)\n\n im_blob, _, _ = _get_image_blob(im, im_scale, PIXEL_MEANS)\n\n im_blob = FCT(np.transpose(im_blob, [0,3,1,2]))\n _, labels_2d, vertex_pred, hough_outputs, poses_pred = model.forward(im_blob, FCT(extents), FCT(poses), FCT(K))\n rois, poses_init = hough_outputs[:2]\n\n labels_2d = labels_2d.data.cpu().numpy()\n vertex_pred = vertex_pred.data.cpu().numpy()\n rois = rois.data.cpu().numpy()\n poses_init = poses_init.data.cpu().numpy()\n poses_pred = poses_pred.data.cpu().numpy()\n\n # non-maximum suppression\n keep = nms(rois, 0.5)\n rois = rois[keep, :]\n poses_init = poses_init[keep, :]\n poses_pred = poses_pred[keep, :]\n # print keep\n # print rois\n\n # combine poses\n poses = poses_init\n for i in xrange(rois.shape[0]):\n class_id = int(rois[i, 1])\n if class_id >= 0:\n poses[i, :4] = poses_pred[i, 4*class_id:4*class_id+4]\n\n labels = labels_2d[0,:,:].astype(np.int32)\n labels = unpad_im(labels, 16)\n\n roi_classes = [imdb._classes[int(c)] for c in rois[:,1]]\n\n # build the label image\n im_label = imdb.labels_to_image(im, labels)\n\n labels_new = cv2.resize(labels, None, None, fx=1.0/im_scale, fy=1.0/im_scale, interpolation=cv2.INTER_NEAREST)\n\n pose_data = [{\"name\": roi_classes[ix], \"pose\": p.tolist()} for ix, p in enumerate(poses)]\n\n SAVE = True\n\n im_file_prefix = im_file.replace(\"color.png\",\"\")\n # np.save(im_file_prefix + \"label2d.npy\", labels_2d)\n # np.save(im_file_prefix + \"vert_pred.npy\", vertex_pred)\n\n if SAVE:\n import json\n j_file = im_file_prefix + \"pred_pose.json\"\n with open(j_file, \"w\") as f:\n j_data = {\"poses\": pose_data, \"meta\": {'intrinsic_matrix': K.tolist(), 'factor_depth': factor_depth}}\n json.dump(j_data, f)\n print(\"Saved pose data to %s\"%(j_file))\n\n VIS = True\n # if cfg.TEST.VISUALIZE:\n if VIS:\n color_m = np.zeros(img.shape, dtype=np.uint8)\n for c in xrange(num_classes-1):\n cls = c + 1\n color = np.random.randint(0,255,size=(3))\n color_m[labels==cls] = color\n cv2.imshow(\"img\", img)\n cv2.imshow(\"m\", color_m)\n cv2.waitKey(100)\n\n im_depth = pad_im(cv2.imread(depth_file, cv2.IMREAD_UNCHANGED), 16)\n\n from vis_utils import extract_vertmap, vis_segmentations_vertmaps_detection\n\n vp = np.transpose(vertex_pred[0], [1,2,0])\n vertmap = extract_vertmap(labels, vp, num_classes)\n # K = np.array([[1066.778, 0, 312.9869], [0, 1067.487, 241.3109], [0, 0, 1]])\n vis_segmentations_vertmaps_detection(im, im_depth, im_label, imdb._class_colors, vertmap, \n labels, rois, poses, K, imdb.num_classes, imdb._classes, imdb._points_all)\n","sub_path":"my_tools/demo_pth.py","file_name":"demo_pth.py","file_ext":"py","file_size_in_byte":5093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"416326009","text":"def display(arr):\n for i in arr:\n print(i,end=\"\")\n\n\n\n\ndef extraLongFactorials(n):\n a = 1\n while n:\n a = a * n\n n -= 1\n arr = []\n a=str(a)\n for char in a:\n arr.append(int(char))\n\n display(arr)\n return\n\nif __name__ == '__main__':\n n = int(input(\"eneter number\"))\n\n print(extraLongFactorials(n))\n","sub_path":"Mix Questions/factorialLargeNumber.py","file_name":"factorialLargeNumber.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"353090728","text":"import functions as fc\nimport matplotlib.pyplot as plt\nimport pickle\nimport numpy as np\n\n#Initialize a seed for the rest of the program\nseed = 5283597438\n\n#Importing the class 'functions' and initialise the seed\nF = fc.functions(seed)\n\n#Generating N=1000 random numbers and saving it in rand_nums\nN = 1000\nrand_nums = list([0])*N\nfor i in range(1000):\n\trand_nums[i] = F.RNG()\n\n#Scatter plot the random numbers for x[i+1] (x[1:1000]) against x[i] (x[0:999])\nplt.scatter(rand_nums[1:1000],rand_nums[0:999],s=1,color='black')\nplt.xlabel(r'$x_{i+1}$')\nplt.ylabel(r'$x_{i}$')\nplt.savefig('plots/rng_1000.png')\nplt.close()\n\n#Generating one million random numbers and saving it in rand_nums\nN = 1000000\nrand_nums = list([0])*N\nfor i in range(N):\n\trand_nums[i] = F.RNG()\n\n#plotting the result in a histogram to check for uniformity\nplt.hist(rand_nums,bins=20)\nplt.xlabel('Random Value')\nplt.savefig('plots/rng_mil.png')\nplt.close()\n\n#Saving the last produced random number from the LCG to continue generate \n#unique pseudo random numbers in the next exercises without initializing \n#the seed (and other instances) as user\nwith open('instances.pkl','wb') as output:\n\tpickle.dump(F,output,pickle.HIGHEST_PROTOCOL)\noutput.close()","sub_path":"NUR-final/Q1b.py","file_name":"Q1b.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"579160178","text":"import sys\nfrom operator import add\n\nfrom pyspark import SparkContext\n\nfnum = 3\n\n\ndef merge(acc, line):\n fields = line[1].strip().split('|')\n if len(fields) != fnum + 1:\n return acc\n return (acc[0] + 1, [acc[1][ii] + float(fields[ii + 1])\n for ii in range(fnum)], acc[2])\n\n\ndef comb(acc1, acc2):\n return (acc1[0] + acc2[0], [acc1[1][ii] + acc2[1][ii]\n for ii in range(fnum)], acc1[2])\n\n\ndef avgr(elem):\n if elem[1][0] == 0:\n return (elem[0], '0', ' '.join(['0.0'] * fnum))\n\n avgv = ' '.join([str(x / elem[1][0]) for x in elem[1][1]])\n return (elem[1][2], elem[0], str(elem[1][0]), avgv)\n\n\nif __name__ == \"__main__\":\n sc = SparkContext(appName=\"PythonWordCount\")\n\n data_file = sc.parallelize([\n (u'u1', u'|1.0|2.0|0', 20), (u'u1',\n u'|1.0|2.0|0', 5), (u'u1', u'|1.0|2.0|1.0', 5),\n (u'u2', u'|2.0|2.0|0', 5), (u'u2', u'|2.0|1.0|0', 10), (u'u3', u'2.0|1.0|0', 10)])\n\n avg_by_group = (data_file\n .map(lambda x: (x[0], x))\n .aggregateByKey((0, [0.0] * fnum, '20181204'), merge, comb)\n .map(avgr))\n\n print(avg_by_group.collect())\n","sub_path":"OK/misc/PySpark/SparkGroupReduce.py","file_name":"SparkGroupReduce.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"585134423","text":"from sqlalchemy.orm import Session\nfrom fastapi import status, HTTPException,Response\nfrom blog import models ,schemas\n\ndef get_all(db: Session):\n blogs = db.query(models.Blog).all()\n return blogs\n\n\ndef create(request: schemas.Blog, db : Session ):\n # return {'title':request.title, \"body\":request.body}\n new_blog = models.Blog(title= request.title, body = request.body,user_id = 1)\n db.add(new_blog)\n db.commit()\n db.refresh(new_blog)\n return new_blog\n\n\ndef destroy(id, db: Session):\n blog = db.query(models.Blog).filter(models.Blog.id == id)\n if not blog.first():\n raise HTTPException(status_code= status.HTTP_404_NOT_FOUND,\n detail = f\"Blog with id {id} is not found :( please try again!!\")\n \n blog.delete(synchronize_session = False)\n\n db.commit()\n\n return \"Successfully deleted\"\n\n\ndef update(id:int, request: schemas.Blog, db:Session ):\n blog = db.query(models.Blog).filter(models.Blog.id == id)\n if not blog.first():\n raise HTTPException(status_code= status.HTTP_404_NOT_FOUND,\n detail = f\"Blog with id {id} is not found :( please try again!!\")\n blog.update(request)\n db.commit()\n return request\n\ndef show(id ,db : Session):\n blog = db.query(models.Blog).filter(models.Blog.id == id).first()\n if not blog:\n raise HTTPException(status_code = status.HTTP_404_NOT_FOUND,\n detail = f\"blog with id {id} is not found\")\n # response.status_code = status.HTTP_404_NOT_FOUND\n # return {'detail': f\"blog with id : {id} is not found\"}\n return blog\n","sub_path":"web/blog/repository/blog_func.py","file_name":"blog_func.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"610756753","text":"import functools\nimport numpy as np\n\nfrom federatedml.util import consts\nfrom federatedml.statistic import data_overview\n\nfrom arch.api.utils import log_utils\n\nLOGGER = log_utils.getLogger()\n\n\nclass Imputer(object):\n \"\"\"\n This class provides basic strategies for values replacement. It can be used as missing filled or outlier replace.\n You can use the statistics such as mean, median or max of each column to fill the missing value or replace outlier.\n \"\"\"\n\n def __init__(self, imputer_value_list=None):\n \"\"\"\n Parameters\n ----------\n imputer_value_list: list of str, the value to be replaced. Default None, if is None, it will be set to list of blank, none, null and na,\n which regarded as missing filled. If not, it can be outlier replace, and imputer_value_list includes the outlier values\n \"\"\"\n if imputer_value_list is None:\n self.imputer_value_list = ['', 'none', 'null', 'na']\n else:\n self.imputer_value_list = imputer_value_list\n\n self.support_replace_method = ['min', 'max', 'mean', 'designated']\n # self.support_replace_method = ['min', 'max', 'mean', 'meadian', 'quantile', 'designated' ]\n self.support_output_format = {\n 'str': str,\n 'float': float,\n 'int': int,\n 'origin': None\n }\n\n self.support_replace_area = {\n 'min': 'col',\n 'max': 'col',\n 'mean': 'col',\n 'meadian': 'col',\n 'quantile': 'col',\n 'designated': 'col'\n }\n\n def get_imputer_value_list(self):\n return self.imputer_value_list\n\n @staticmethod\n def __get_min(data):\n min_list = None\n for key, value in data:\n if min_list is None:\n min_list = [None for _ in range(len(value))]\n\n for i in range(len(value)):\n try:\n f_value = float(value[i])\n except:\n f_value = None\n\n if f_value is None:\n continue\n\n if min_list[i] is None or f_value < min_list[i]:\n min_list[i] = f_value\n\n return min_list\n\n @staticmethod\n def __get_max(data):\n max_list = None\n for key, value in data:\n if max_list is None:\n max_list = [None for i in range(len(value))]\n\n for i in range(len(value)):\n try:\n f_value = float(value[i])\n except:\n f_value = None\n\n if f_value is None:\n continue\n\n if max_list[i] is None or f_value > max_list[i]:\n max_list[i] = f_value\n\n return max_list\n\n @staticmethod\n def __get_mean(data):\n cols_value_sum = None\n cols_value_counter = None\n for key, value in data:\n if cols_value_sum is None:\n cols_value_sum = [0 for i in range(len(value))]\n cols_value_counter = [0 for i in range(len(value))]\n\n for i in range(len(value)):\n try:\n f_value = float(value[i])\n except:\n f_value = None\n\n if f_value is None:\n continue\n\n cols_value_sum[i] += f_value\n cols_value_counter[i] += 1\n\n return cols_value_sum, cols_value_counter\n\n @staticmethod\n def __replace_missing_value_with_cols_transform_value_format(data, transform_list, missing_value_list,\n output_format):\n for i in range(len(data)):\n if str(data[i]).lower() in missing_value_list:\n data[i] = output_format(transform_list[i])\n else:\n data[i] = output_format(data[i])\n\n return data\n\n @staticmethod\n def __replace_missing_value_with_cols_transform_value(data, transform_list, missing_value_list):\n for i in range(len(data)):\n if str(data[i]).lower() in missing_value_list:\n data[i] = str(transform_list[i])\n\n return data\n\n @staticmethod\n def __replace_missing_value_with_replace_value_format(data, replace_value, missing_value_list, output_format):\n for i in range(len(data)):\n if str(data[i]).lower() in missing_value_list:\n data[i] = output_format(replace_value)\n else:\n data[i] = output_format(data[i])\n\n return data\n\n @staticmethod\n def __replace_missing_value_with_replace_value(data, replace_value, missing_value_list):\n for i in range(len(data)):\n if str(data[i]).lower() in missing_value_list:\n data[i] = str(replace_value)\n\n return data\n\n def __get_cols_transform_min_value(self, data):\n min_lists = data.mapPartitions(Imputer.__get_min)\n cols_transform_min_value = None\n for min_tuple in list(min_lists.collect()):\n if cols_transform_min_value is None:\n cols_transform_min_value = min_tuple[1]\n else:\n # some return of partition maybe None\n if min_tuple[1] is None:\n continue\n\n for i in range(len(min_tuple[1])):\n if min_tuple[1][i] is None:\n continue\n\n if cols_transform_min_value[i] is None:\n cols_transform_min_value[i] = min_tuple[1][i]\n elif min_tuple[1][i] < cols_transform_min_value[i]:\n cols_transform_min_value[i] = min_tuple[1][i]\n return cols_transform_min_value\n\n def __get_cols_transform_max_value(self, data):\n max_lists = data.mapPartitions(Imputer.__get_max)\n cols_transform_max_value = None\n for max_tuple in list(max_lists.collect()):\n if cols_transform_max_value is None:\n cols_transform_max_value = max_tuple[1]\n else:\n # some return of partition maybe None\n if max_tuple[1] is None:\n continue\n\n for i in range(len(max_tuple[1])):\n if max_tuple[1][i] is None:\n continue\n\n if cols_transform_max_value[i] is None:\n cols_transform_max_value[i] = max_tuple[1][i]\n elif max_tuple[1][i] > cols_transform_max_value[i]:\n cols_transform_max_value[i] = max_tuple[1][i]\n return cols_transform_max_value\n\n def __get_cols_transform_mean_value(self, data):\n get_mean_results = data.mapPartitions(Imputer.__get_mean)\n cols_sum = None\n cols_counter = None\n\n for value_tuple in list(get_mean_results.collect()):\n if cols_sum is None:\n cols_sum = [0 for i in range(len(value_tuple[1][0]))]\n if cols_counter is None:\n cols_counter = [0 for i in range(len(value_tuple[1][1]))]\n\n value_sum = value_tuple[1][0]\n value_counter = value_tuple[1][1]\n # some return of partition maybe None\n if value_sum is None and value_counter is None:\n continue\n\n for i in range(len(value_sum)):\n if value_sum[i] is None:\n LOGGER.debug(\"col {} of cols_sum is None, continue\".format(i))\n continue\n\n cols_sum[i] += value_sum[i]\n\n for i in range(len(value_counter)):\n if value_counter[i] is None:\n LOGGER.debug(\"col {} of cols_counter is None, continue\".format(i))\n continue\n\n cols_counter[i] += value_counter[i]\n\n if cols_sum is None or cols_counter is None:\n raise ValueError(\"Something wrong with data\")\n\n cols_transform_mean_value = None\n for i in range(len(cols_sum)):\n if cols_sum[i] is None or cols_counter[i] is None:\n raise ValueError(\"Something wrong with cols_sum or cols_counter\")\n\n if cols_transform_mean_value is None:\n cols_transform_mean_value = [None for i in range(len(cols_sum))]\n\n if cols_counter[i] == 0:\n cols_transform_mean_value[i] = 0\n else:\n cols_transform_mean_value[i] = np.around(cols_sum[i] / cols_counter[i], 6)\n\n if None in cols_transform_mean_value:\n raise ValueError(\"Some of value in cols_transform_mean_value is None, please check it\")\n\n return cols_transform_mean_value\n\n def __get_cols_transform_value(self, data, replace_method='0'):\n if replace_method == consts.MIN:\n cols_transform_value = self.__get_cols_transform_min_value(data)\n elif replace_method == consts.MAX:\n cols_transform_value = self.__get_cols_transform_max_value(data)\n elif replace_method == consts.MEAN:\n cols_transform_value = self.__get_cols_transform_mean_value(data)\n else:\n raise ValueError(\"Unknown replace method:{}\".format(replace_method))\n\n return cols_transform_value\n\n def __replace(self, data, replace_method, replace_value=None, output_format=None):\n if replace_method is not None and replace_method != consts.DESIGNATED:\n cols_transform_value = self.__get_cols_transform_value(data, replace_method)\n if output_format is not None:\n f = functools.partial(Imputer.__replace_missing_value_with_cols_transform_value_format,\n transform_list=cols_transform_value, missing_value_list=self.imputer_value_list,\n output_format=output_format)\n else:\n f = functools.partial(Imputer.__replace_missing_value_with_cols_transform_value,\n transform_list=cols_transform_value, missing_value_list=self.imputer_value_list)\n\n transform_data = data.mapValues(f)\n LOGGER.debug(\n \"finish replace missing value with cols transform value, replace method is {}\".format(replace_method))\n return transform_data, cols_transform_value\n else:\n if replace_value is None:\n raise ValueError(\"Replace value should not be None\")\n if output_format is not None:\n f = functools.partial(Imputer.__replace_missing_value_with_replace_value_format,\n replace_value=replace_value, missing_value_list=self.imputer_value_list,\n output_format=output_format)\n else:\n f = functools.partial(Imputer.__replace_missing_value_with_replace_value, replace_value=replace_value,\n missing_value_list=self.imputer_value_list)\n transform_data = data.mapValues(f)\n\n LOGGER.debug(\"finish replace missing value with replace value {}\".format(replace_value))\n shape = data_overview.get_data_shape(data)\n replace_value = [replace_value for _ in range(shape)]\n\n return transform_data, replace_value\n\n def __transform_replace(self, data, transform_value, replace_area, output_format):\n LOGGER.debug(\"replace_area:{}\".format(replace_area))\n if replace_area == 'all':\n if output_format is not None:\n f = functools.partial(Imputer.__replace_missing_value_with_replace_value_format,\n replace_value=transform_value, missing_value_list=self.imputer_value_list,\n output_format=output_format)\n else:\n f = functools.partial(Imputer.__replace_missing_value_with_replace_value,\n replace_value=transform_value, missing_value_list=self.imputer_value_list)\n elif replace_area == 'col':\n if output_format is not None:\n f = functools.partial(Imputer.__replace_missing_value_with_cols_transform_value_format,\n transform_list=transform_value, missing_value_list=self.imputer_value_list,\n output_format=output_format)\n else:\n f = functools.partial(Imputer.__replace_missing_value_with_cols_transform_value,\n transform_list=transform_value, missing_value_list=self.imputer_value_list)\n else:\n raise ValueError(\"Unknown replace area {} in Imputer\".format(replace_area))\n\n transform_data = data.mapValues(f)\n return transform_data\n\n def fit(self, data, replace_method=None, replace_value=None, output_format=consts.ORIGIN):\n \"\"\"\n Apply imputer for input data\n Parameters\n ----------\n data: data_instance, input data\n replace_method: str, the strategy of imputer, like min, max, mean or designated and so on. Default None\n replace_value: str, if replace_method is designated, you should assign the replace_value which will be used to replace the value in imputer_value_list\n output_format: str, the output data format. The output data can be 'str', 'int', 'float'. Default origin, the original format as input data\n\n Returns\n ----------\n fit_data:data_instance, data after imputer\n cols_transform_value: list, the replace value in each column\n \"\"\"\n if output_format not in self.support_output_format:\n raise ValueError(\"Unsupport output_format:{}\".format(output_format))\n\n output_format = self.support_output_format[output_format]\n\n if isinstance(replace_method, str):\n replace_method = replace_method.lower()\n if replace_method not in self.support_replace_method:\n raise ValueError(\"Unknown replace method in Imputer\")\n\n process_data, cols_transform_value = self.__replace(data, replace_method, replace_value, output_format)\n return process_data, cols_transform_value\n elif replace_method is None:\n replace_value = '0'\n process_data, replace_value = self.__replace(data, replace_method, replace_value, output_format)\n return process_data, replace_value\n else:\n raise ValueError(\"parameter replace_method should be str or None only\")\n\n def transform(self, data, replace_method=None, transform_value=None, output_format=consts.ORIGIN):\n \"\"\"\n Transform input data using imputer with fit results\n Parameters\n ----------\n data: data_instance, input data\n replace_method: str, the strategy of imputer, like min, max, mean or designated and so on. Default None\n output_format: str, the output data format. The output data can be 'str', 'int', 'float'. Default origin, the original format as input data\n\n Returns\n ----------\n transform_data:data_instance, data after transform\n \"\"\"\n if output_format not in self.support_output_format:\n raise ValueError(\"Unsupport output_format:{}\".format(output_format))\n\n output_format = self.support_output_format[output_format]\n\n # Now all of replace_method is \"col\", remain replace_area temporarily\n LOGGER.debug(\"replace_method:{}\".format(replace_method))\n # replace_area = self.support_replace_area[replace_method]\n replace_area = \"col\"\n process_data = self.__transform_replace(data, transform_value, replace_area, output_format)\n\n return process_data\n","sub_path":"federatedml/feature/imputer.py","file_name":"imputer.py","file_ext":"py","file_size_in_byte":15726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"618119433","text":"\"\"\"\n * Copyright 2020, Departamento de sistemas y Computación\n * Universidad de Los Andes\n *\n *\n * Desarrolado para el curso ISIS1225 - Estructuras de Datos y Algoritmos\n *\n *\n * This program is free software: you can redistribute it and/or modify\n * it under the terms of the GNU General Public License as published by\n * the Free Software Foundation, either version 3 of the License, or\n * (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU General Public License for more details.\n *\n * You should have received a copy of the GNU General Public License\n * along with this program. If not, see .\n \"\"\"\nimport config\nimport csv\nfrom DISClib.ADT import list as lt\nfrom DISClib.ADT import map as mp\nfrom DISClib.DataStructures import mapentry as me\nfrom time import process_time \nassert config\n\n\"\"\"\nEn este archivo definimos los TADs que vamos a usar,\nes decir contiene los modelos con los datos en memoria\n\n\"\"\"\n\n# -----------------------------------------------------\n# API del TAD Catalogo de Libros\n# -----------------------------------------------------\ndef newCatalog():\n \"\"\" Inicializa el catálogo de peliculas\n Crea una lista vacia para guardar todas las peliculas\n Se crean indices (Maps) por los siguientes criterios:\n Retorna el catalogo inicializado.\n \"\"\"\n catalog = {'original_title': None,\n 'id': None,\n 'date': None,\n 'average_count': None,\n 'vote_count': None,\n 'language': None}\n\n catalog['original_title'] = lt.newList('SINGLE_LINKED', compareMoviesIds)\n\n catalog['id'] = mp.newMap(200,\n maptype='PROBING',\n loadfactor=0.4,\n comparefunction=compareMapMovieIds)\n catalog['date'] = mp.newMap(200,\n maptype='PROBING',\n loadfactor=0.4,\n comparefunction=compareMapYear)\n catalog['average_count'] = mp.newMap(1000,\n maptype='CHAINING',\n loadfactor=0.7,\n comparefunction=compareMapAverage)\n catalog['vote_count'] = mp.newMap(1000,\n maptype='CHAINING',\n loadfactor=0.7,\n comparefunction=compareMapVote)\n catalog['language'] = mp.newMap(500,\n maptype='CHAINING',\n loadfactor=0.7,\n comparefunction=compareMapLanguage)\n\n return catalog\n\n# ==============================\n# Funciones de consulta\n# ==============================\n\n\n# ==============================\n# Funciones de Comparacion\n# ==============================\ndef compareMoviesIds(id1, id2):\n \"\"\"\n Compara los nombres de las movies\n \"\"\"\n if (id1[0] < id2[0]):\n return 0\n elif id1[0] > id2[0]:\n return 1\n else:\n return -1\n\n\ndef compareMapMovieIds(id, entry):\n \"\"\"\n Compara dos ids de peliculas, id es un identificador\n y entry una pareja llave-valor\n \"\"\"\n identry = me.getKey(entry)\n if (int(id) == int(identry)):\n return 0\n elif (int(id) > int(identry)):\n return 1\n else:\n return -1\n\ndef compareMapYear(id, entry):\n \"\"\"\n Compara dos fechas de peliculas, id es un identificador\n y entry una pareja llave-valor\n \"\"\"\n yearentry = me.getKey(entry)\n if (int(id) == int(yearentry)):\n return 0\n elif (int(id) > int(yearentry)):\n return 1\n else:\n return -1\ndef compareMapAverage(id, entry):\n \"\"\"\n Compara dos ids de libros, id es un identificador\n y entry una pareja llave-valor\n \"\"\"\n aventry = me.getKey(entry)\n if (float(id) == float(aventry)):\n return 0\n elif (float(id) > float(aventry)):\n return 1\n else:\n return -1\ndef compareMapVote(id, entry):\n \"\"\"\n Compara dos ids de libros, id es un identificador\n y entry una pareja llave-valor\n \"\"\"\n identry = me.getKey(entry)\n if (float(id) == float(identry)):\n return 0\n elif (float(id) > float(identry)):\n return 1\n else:\n return -1\ndef compareMapLanguage(id, entry):\n \"\"\"\n Compara dos ids de libros, id es un identificador\n y entry una pareja llave-valor\n \"\"\"\n identry = me.getKey(entry)\n if (id[0] == identry[0]):\n return 0\n elif (id[0] > identry[0]):\n return 1\n else:\n return -1\n","sub_path":"App/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"104258290","text":"import os\n\n\n\n\ndef define_env(env):\n \"\"\"\n This is the hook for the functions (new form)\n \"\"\"\n\n env.variables.cwd = os.getcwd()\n\n # use dot notation for adding\n env.variables.baz = env.variables.fix_url('foo')\n\n @env.macro\n def include_file(filename, start_line=0, end_line=None):\n \"\"\"\n Include a file, optionally indicating start_line and end_line\n (start counting from 0)\n The path is relative to the top directory of the documentation\n project.\n \"\"\"\n full_filename = os.path.join(env.project_dir, filename)\n with open(full_filename, 'r') as f:\n lines = f.readlines()\n line_range = lines[start_line:end_line]\n return '\\n'.join(line_range)\n \n\n @env.macro\n def doc_env():\n \"Document the environment\"\n return {name:getattr(env, name) for name in dir(env) if not name.startswith('_')}\n\n \n env.variables.special_docs_dir = env.variables.config['docs_dir']","sub_path":"test/module/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"312825950","text":"from django.http.response import Http404\nfrom .email import send_verification_email\nfrom django.shortcuts import render,redirect, get_object_or_404\nfrom .forms import *\nfrom django.http import HttpResponse,HttpResponseRedirect,Http404,JsonResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import Group\n\nfrom django.urls import reverse\nfrom django.contrib.auth import login as auth_login\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.contrib.auth import login, authenticate\nfrom django.contrib import messages\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom .decorators import unauthenticated_user,allowed_users,admin_only\n\nfrom .models import JobSeeker,Employer \nfrom .models import User\n\n# Create your views here.\n\ndef index(request):\n return render(request,'index.html')\n\n@unauthenticated_user\ndef register(request):\n return render(request,'registration/register.html')\n\n@unauthenticated_user\ndef registerJobseeker(request):\n registered=False\n if request.method=='POST':\n job_seeker_form=UserSignUpForm(request.POST)\n if job_seeker_form.is_valid():\n user=job_seeker_form.save()\n user.refresh_from_db()\n user.email = job_seeker_form.cleaned_data.get('email')\n user.is_jobseeker = True\n group, created = Group.objects.get_or_create(name='jobseeker')\n group = Group.objects.get(name = 'jobseeker')\n user.groups.add(group)\n user.save()\n registered=True\n return redirect('login')\n else:\n job_seeker_form=UserSignUpForm()\n return render(request,'registration/registerJobseeker.html',{'job_seeker_form':job_seeker_form,'registered':registered})\n\n@unauthenticated_user\ndef registerEmployer(request):\n registered=False\n if request.method=='POST':\n employer_form=UserSignUpForm(request.POST)\n if employer_form.is_valid():\n user=employer_form.save()\n user.refresh_from_db()\n user.email = employer_form.cleaned_data.get('email')\n user.is_employer = True\n group, created = Group.objects.get_or_create(name='employer')\n group = Group.objects.get(name = 'employer')\n user.groups.add(group)\n user.save()\n registered=True\n return redirect('login')\n else:\n employer_form=UserSignUpForm()\n \n return render(request,'registration/registerEmployer.html',{'employer_form':employer_form,'registered':registered})\n\n@unauthenticated_user \ndef login(request):\n if request.method == 'POST':\n form = AuthenticationForm(request=request, data=request.POST)\n if form.is_valid():\n username = form.cleaned_data.get('username')\n email = form.cleaned_data.get('email')\n phone = form.cleaned_data.get('phone')\n password = form.cleaned_data.get('password')\n user = authenticate(username=username,email=email,phone=phone, password=password)\n if user is not None:\n auth_login(request, user)\n messages.info(request, f\"You are now logged in as {username}\")\n return redirect('index')\n else:\n messages.error(request, \"Invalid username or password.\")\n else:\n messages.error(request, \"Invalid username or password.\")\n form = AuthenticationForm()\n return render(request = request,template_name = \"registration/login.html\",context={\"form\":form})\n\n@login_required\ndef dashboard(request):\n current = request.user\n if current.is_employer:\n return redirect('employer_profile/')\n elif current.is_admin:\n return redirect('admin_dashboard')\n else: \n return redirect('jobseekerDash/')\n return render(request,'dashboard.html')\n\n@login_required\n@allowed_users(allowed_roles=['admin','jobseeker'])\ndef jobseeker_profile(request):\n current_user = request.user\n documents = FileUpload.objects.filter(jobseeker_id = current_user.id).all()\n \n return render(request,'jobseekers/profile.html',{\"documents\":documents,\"current_user\":current_user})\n\n@login_required\n@allowed_users(allowed_roles=['admin','jobseeker'])\ndef update_jobseeker_profile(request):\n if request.method == 'POST':\n user_form = UpdateUserProfile(request.POST,request.FILES,instance=request.user)\n jobseeker_form = UpdateJobseekerProfile(request.POST,instance=request.user.profile)\n if user_form.is_valid() and jobseeker_form.is_valid():\n user_form.save()\n jobseeker_form.save()\n messages.success(request,'Your Profile account has been updated successfully')\n return redirect('jobseeker_profile')\n else:\n user_form = UpdateUserProfile(instance=request.user)\n jobseeker_form = UpdateJobseekerProfile(instance=request.user.profile) \n params = {\n 'user_form':user_form,\n 'jobseeker_form':jobseeker_form\n }\n return render(request,'jobseekers/update.html',params)\n\n@login_required\n@allowed_users(allowed_roles=['admin','jobseeker'])\ndef jobseekerDash(request):\n return render(request,'jobseekers/jobseeker_dashboard.html')\n\n@login_required\n@allowed_users(allowed_roles=['admin','jobseeker'])\ndef upload_file(request):\n if request.method == 'POST':\n upload_form = UploadFileForm(request.POST, request.FILES)\n if upload_form.is_valid():\n upload = upload_form.save(commit=False)\n upload.jobseeker = request.user.profile\n upload.save()\n return redirect('jobseeker_profile')\n else:\n upload_form = UploadFileForm()\n return render(request, 'jobseekers/upload_file.html', {'upload_form': upload_form})\n\n# employers and misc\n@login_required\n@allowed_users(allowed_roles=['admin','employer'])\ndef employerDash(request):\n job_seekers = User.objects.filter(verified = True,is_jobseeker = True).all()\n employer=Employer.objects.all()\n context={\n \"job_seekers\":job_seekers,\n \"employer\":employer\n }\n return render(request,'employers/employer_dashboard.html',context)\n\n@login_required\n@allowed_users(allowed_roles=['admin','employer'])\ndef employerProfile(request,id):\n employer=Employer.objects.get(id=id)\n context={\n \"employer\":employer,\n }\n return render(request,'employers/employer_profile.html',context)\n \n# test\n@login_required\n@allowed_users(allowed_roles=['admin','employer'])\ndef employerProfile(request):\n employer=request.user\n context={\n \"employer\":employer,\n }\n return render(request,'employers/employer_profile.html',context)\n \n\n# update employers\n@login_required\n@allowed_users(allowed_roles=['admin','employer'])\ndef update_employer(request):\n if request.method == 'POST':\n u_form = UpdateUserProfile(request.POST,request.FILES,instance=request.user)\n p_form = UpdateEmployerProfile(request.POST,instance=request.user.employer)\n if u_form.is_valid() and p_form.is_valid():\n u_form.save()\n p_form.save()\n messages.success(request,'Your Profile account has been updated successfully')\n return redirect('employer_profile')\n else:\n u_form = UpdateUserProfile(instance=request.user)\n p_form = UpdateEmployerProfile(instance=request.user.employer) \n context = {\n 'u_form':u_form,\n 'p_form':p_form\n }\n return render(request,'employers/update_employer.html',context)\n\n \n# specific jobseeker\n@login_required\n@allowed_users(allowed_roles=['admin','employer'])\ndef single_jobseeker(request,jobseeker_id):\n try:\n jobseeker =get_object_or_404(JobSeeker, pk = jobseeker_id)\n documents = FileUpload.objects.filter(jobseeker_id = jobseeker_id)\n portfolios=Portfolio.objects.filter(jobseeker_id = jobseeker_id)\n\n except ObjectDoesNotExist:\n raise Http404()\n\n return render(request,'employers/single_jobseeker.html',{'documents':documents, 'jobseeker':jobseeker,\"portfolios\":portfolios})\n\n\n# jobseeker update portfolio\ndef add_portfolios(request):\n if request.method == 'POST':\n port_form=AddPortfolio(request.POST,instance=request.user)\n if port_form.is_valid():\n port_form.save()\n messages.success(request,'Your Portfolio has been added')\n return redirect('jobseeker_profile')\n else:\n port_form = AddPortfolio(instance=request.user)\n context = {\n 'port_form': port_form,\n }\n return render(request,\"jobseekers/portfolio.html\",context)\n\n# admin\n\n@login_required\n@admin_only\ndef adminDash(request):\n all_jobseekers = User.objects.filter(is_jobseeker=True).all()\n verified_jobseekers = User.objects.filter(verified=True,is_jobseeker = True).all()\n unverified_jobseekers = User.objects.filter(verified = False,is_jobseeker = True).all()\n return render(request,'admin/admin_dashboard.html',{'verified_jobseekers':verified_jobseekers,'unverified_jobseekers':unverified_jobseekers,'all_jobseekers':all_jobseekers})\n\n# ADMIN VIEWS\n# JobSeeker views\n@allowed_users(allowed_roles=['admin'])\n@login_required\ndef all_jobseekers(request):\n all_jobseekers = User.objects.filter(is_jobseeker=True).all()\n return render(request,'admin/jobseekers/all_jobseekers.html',{'all_jobseekers':all_jobseekers})\n\n@login_required\n@allowed_users(allowed_roles=['admin'])\ndef verified_jobseekers(request):\n verified_jobseekers = User.objects.filter(verified=True,is_jobseeker = True).all()\n return render(request,'admin/jobseekers/verified_jobseekers.html',{'verified_jobseekers':verified_jobseekers})\n\n@login_required\n@allowed_users(allowed_roles=['admin'])\ndef unverified_jobseekers(request):\n unverified_jobseekers = User.objects.filter(verified = False,is_jobseeker = True).all()\n return render(request,'admin/jobseekers/unverified_jobseekers.html',{'unverified_jobseekers':unverified_jobseekers})\n\n@login_required\n@allowed_users(allowed_roles=['admin'])\ndef verify_jobseeker(request, jobseeker_id):\n user = User.objects.get(pk=jobseeker_id)\n name = user.username\n email = user.email\n if request.method == 'POST':\n verify_jobseeker_form = AdminVerifyUserForm(request.POST,request.FILES, instance=user)\n if verify_jobseeker_form.is_valid():\n verify_jobseeker_form.save()\n send_verification_email(name, email)\n data = {'success': 'Verification sent'}\n messages.success(request, f'jobseeker updated!')\n return redirect('admin_dashboard')\n else:\n verify_jobseeker_form = AdminVerifyUserForm(instance=user)\n\n return render(request, 'admin/jobseekers/verify_jobseeker.html', {\"verify_jobseeker_form\":verify_jobseeker_form})\n\n@login_required\n@allowed_users(allowed_roles=['admin'])\ndef delete_jobseeker(request,jobseeker_id):\n jobseeker = User.objects.get(pk=jobseeker_id)\n if jobseeker:\n jobseeker.delete_user()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n#Get single jobseeker\n@login_required\n@allowed_users(allowed_roles=['admin'])\ndef jobseeker_details(request,jobseeker_id):\n try:\n jobseeker =get_object_or_404(JobSeeker, pk = jobseeker_id)\n\n except ObjectDoesNotExist:\n raise Http404()\n\n return render(request,'admin/jobseekers/jobseeker_details.html',{'jobseeker':jobseeker})\n\n\n #Admin Employer views\n@login_required\n@allowed_users(allowed_roles=['admin'])\ndef all_employers(request):\n all_employers = User.objects.filter(is_employer=True).all()\n return render(request,'admin/employers/all_employers.html',{'all_employers':all_employers})\n\n@login_required\n@allowed_users(allowed_roles=['admin'])\ndef verified_employers(request):\n verified_employers = User.objects.filter(verified = True,is_employer = True).all()\n return render(request,'admin/employers/verified_employers.html',{'verified_employers':verified_employers})\n\n@login_required\n@allowed_users(allowed_roles=['admin'])\ndef unverified_employers(request):\n unverified_employers = User.objects.filter(verified = False,is_employer=True).all()\n return render(request,'admin/employers/unverified_employers.html',{'unverified_employers':unverified_employers})\n\n@login_required\n@allowed_users(allowed_roles=['admin'])\ndef verify_employer(request, employer_id):\n employer = Employer.objects.get(pk=employer_id)\n if request.method == 'POST':\n update_employer_form = AdminVerifyUserForm(request.POST,request.FILES, instance=employer)\n if update_employer_form.is_valid():\n update_employer_form.save()\n messages.success(request, f'employer updated!')\n return redirect('admin_dashboard')\n else:\n update_employer_form = AdminVerifyUserForm(instance=employer)\n\n return render(request, 'admin/employers/update_employer.html', {\"update_employer_form\":update_employer_form})\n\n@login_required\n@allowed_users(allowed_roles=['admin'])\ndef delete_employer(request,employer_id):\n employer = User.objects.get(pk=employer_id)\n if employer:\n employer.delete_user()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n#Get single employer\n@login_required\n@allowed_users(allowed_roles=['admin'])\ndef employer_details(request,employer_id):\n try:\n employer =get_object_or_404(Employer, pk = employer_id)\n \n \n except ObjectDoesNotExist:\n raise Http404()\n\n return render(request,'admin/employers/employers_details.html',{'employer':employer})","sub_path":"Job_Seeking_App/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"381162635","text":"import datetime\nimport json\nimport base64\nimport httplib2\n\nfrom oauth2client.client import OAuth2Credentials\n\nfrom handlers.handler_base import JBoxHandler\n\nfrom jbox_util import unique_sessname, CloudHelper\nfrom jbox_crypto import signstr\nfrom handlers.auth import AuthHandler\nfrom db.user_v2 import JBoxUserV2\nfrom db.invites import JBoxInvite\nfrom jbox_container import JBoxContainer\n\n\nclass MainHandler(JBoxHandler):\n def get(self):\n jbox_cookie = AuthHandler.get_session_cookie(self)\n\n if self.config(\"invite_only\"):\n if self.get_argument(\"invite\", False):\n self.set_cookie(\"is_invite\", \"yes\")\n self.redirect('/hostlaunchipnb/')\n return\n\n if None == jbox_cookie:\n which_msg = int(self.get_argument(\"_msg\", JBoxUserV2.ACTIVATION_NONE))\n if self.get_argument(\"_msg\", \"\") != \"\":\n self.clear_cookie(\"is_invite\")\n if which_msg == JBoxUserV2.ACTIVATION_GRANTED:\n msg = \"Your account has already been approved\"\n elif which_msg == JBoxUserV2.ACTIVATION_REQUESTED:\n msg = \"You have already registered for an invite\"\n else:\n msg = \"Thank you for your interest! We will get back to you with an invitation soon.\"\n state = self.state(success=msg)\n else:\n state = self.state()\n self.rendertpl(\"index.tpl\", cfg=self.config(), state=state)\n else:\n user_id = jbox_cookie['u']\n sessname = unique_sessname(user_id)\n\n if self.config(\"gauth\"):\n try:\n jbuser = JBoxUserV2(user_id)\n except:\n # stale cookie. we don't have the user in our database anymore\n self.log_info(\"stale cookie. we don't have the user in our database anymore. user: \" + user_id)\n self.redirect('/hostlaunchipnb/')\n return\n\n if self.config(\"invite_only\"):\n code, status = jbuser.get_activation_state()\n if status != JBoxUserV2.ACTIVATION_GRANTED:\n invite_code = self.get_argument(\"invite_code\", False)\n if invite_code is not False:\n try:\n invite = JBoxInvite(invite_code)\n except:\n invite = None\n\n if (invite is not None) and invite.is_invited(user_id):\n jbuser.set_activation_state(invite_code, JBoxUserV2.ACTIVATION_GRANTED)\n invite.increment_count()\n invite.save()\n jbuser.save()\n self.redirect('/hostlaunchipnb/')\n return\n else:\n error_msg = 'You entered an invalid invitation code. Try again or request a new invitation.'\n else:\n error_msg = 'Enter an invitation code to proceed.'\n\n self.rendertpl(\"index.tpl\", cfg=self.config(), state=self.state(\n error=error_msg,\n ask_invite_code=True, user_id=user_id))\n return\n\n creds = jbuser.get_gtok()\n if creds is not None:\n try:\n creds_json = json.loads(base64.b64decode(creds))\n creds_json = self.renew_creds(creds_json)\n authtok = creds_json['access_token']\n except:\n self.log_info(\"stale stored creds. will renew on next use. user: \" + user_id)\n creds = None\n authtok = None\n else:\n authtok = None\n else:\n creds = None\n authtok = None\n\n self.chk_and_launch_docker(sessname, creds, authtok, user_id)\n\n def clear_container_cookies(self):\n for name in [\"sessname\", \"hostshell\", \"hostupload\", \"hostipnb\", \"sign\"]:\n self.clear_cookie(name)\n\n def set_container_cookies(self, cookies):\n max_session_time = self.config('expire')\n if max_session_time == 0:\n max_session_time = AuthHandler.AUTH_VALID_SECS\n expires = datetime.datetime.utcnow() + datetime.timedelta(seconds=max_session_time)\n\n for n, v in cookies.iteritems():\n self.set_cookie(n, str(v), expires=expires)\n\n def set_lb_tracker_cookie(self):\n self.set_cookie('lb', signstr(CloudHelper.instance_id(), self.config('sesskey')), expires_days=30)\n\n def chk_and_launch_docker(self, sessname, creds, authtok, user_id):\n cont = JBoxContainer.get_by_name(sessname)\n nhops = int(self.get_argument('h', 0))\n self.log_debug(\"got hop \" + repr(nhops) + \" for session \" + repr(sessname))\n self.log_debug(\"have existing container for \" + repr(sessname) + \": \" + repr(None != cont))\n if cont is not None:\n self.log_debug(\"container running: \" + str(cont.is_running()))\n\n if ((None == cont) or (not cont.is_running())) and (not CloudHelper.should_accept_session()):\n if None != cont:\n cont.backup()\n cont.delete()\n self.clear_container_cookies()\n self.set_header('Connection', 'close')\n self.request.connection.no_keep_alive = True\n if nhops > self.config('numhopmax', 0):\n self.rendertpl(\"index.tpl\", cfg=self.config(), state=self.state(\n error=\"Maximum number of JuliaBox instances active. Please try after sometime.\", success=''))\n else:\n self.redirect('/?h=' + str(nhops + 1))\n else:\n cont = JBoxContainer.launch_by_name(sessname, user_id, True)\n (shellport, uplport, ipnbport) = cont.get_host_ports()\n sign = signstr(sessname + str(shellport) + str(uplport) + str(ipnbport), self.config(\"sesskey\"))\n\n self.set_container_cookies({\n \"sessname\": sessname,\n \"hostshell\": shellport,\n \"hostupload\": uplport,\n \"hostipnb\": ipnbport,\n \"sign\": sign\n })\n self.set_lb_tracker_cookie()\n self.rendertpl(\"ipnbsess.tpl\", sessname=sessname, cfg=self.config(), creds=creds, authtok=authtok,\n user_id=user_id)\n\n @staticmethod\n def renew_creds(creds):\n creds = OAuth2Credentials.from_json(json.dumps(creds))\n http = httplib2.Http(disable_ssl_certificate_validation=True) # pass cacerts otherwise\n creds.refresh(http)\n creds = json.loads(creds.to_json())\n return creds\n\n @staticmethod\n def state(**kwargs):\n s = dict(error=\"\", success=\"\", info=\"\", ask_invite_code=False, user_id=\"\")\n s.update(**kwargs)\n return s\n\n","sub_path":"host/tornado/src/handlers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"66175698","text":"#! /usr/bin/env python3\n\nfrom nutils import mesh, plot, cli, log, library, function, debug\nimport numpy\n\n\n@log.title\ndef makeplots( domain, geom, stress ):\n\n points, colors = domain.elem_eval( [ geom, stress[0,1] ], ischeme='bezier3', separate=True )\n with plot.PyPlot( 'stress', ndigits=0 ) as plt:\n plt.mesh( points, colors, tight=False )\n plt.colorbar()\n\n\ndef main(\n nelems: 'number of elements' = 12,\n lmbda: 'first lamé constant' = 1.,\n mu: 'second lamé constant' = 1.,\n degree: 'polynomial degree' = 2,\n withplots: 'create plots' = True,\n solvetol: 'solver tolerance' = 1e-10,\n ):\n\n # construct mesh\n verts = numpy.linspace( 0, 1, nelems+1 )\n domain, geom = mesh.rectilinear( [verts,verts] )\n\n # prepare basis\n dbasis = domain.basis( 'spline', degree=degree ).vector( 2 )\n\n # construct matrix\n stress = library.Hooke( lmbda=lmbda, mu=mu )\n elasticity = function.outer( dbasis.grad(geom), stress(dbasis.symgrad(geom)) ).sum([2,3])\n matrix = domain.integrate( elasticity, geometry=geom, ischeme='gauss2' )\n\n # construct dirichlet boundary constraints\n cons = domain.boundary['left'].project( 0, geometry=geom, onto=dbasis, ischeme='gauss2' ) \\\n | domain.boundary['right'].project( .5, geometry=geom, onto=dbasis.dotnorm(geom), ischeme='gauss2' )\n\n # solve system\n lhs = matrix.solve( constrain=cons, tol=solvetol, symmetric=True, precon='diag' )\n\n # construct solution function\n disp = dbasis.dot( lhs )\n\n # plot solution\n if withplots:\n makeplots( domain, geom+disp, stress(disp.symgrad(geom)) )\n\n return lhs, cons\n\n\ndef unittest():\n\n retvals = main( nelems=4, degree=1, withplots=False, solvetol=0 )\n assert debug.checkdata( retvals, '''\n eNqlkEsKwzAMRK8Tg1ysnz/H6aLb3H9Zx7LaOhQKKVjMSG/AQgibAEqAbUs3+HzInB+xQxQxZVn6yUmZ\n hgrrUG649JNz0anYTFNae+OabP5LT+vmKn2sQKXUQ/souo8OKyc8VIjUQ+6jw5pLGyGh9gpNHx3WkthC\n zO+Q+eiwX/W05X7fL9fFw/zz5bcKEJ7x0YpY''' )\n\n retvals = main( nelems=4, degree=2, withplots=False, solvetol=0 )\n assert debug.checkdata( retvals, '''\n eNq1ksEOwyAIhl+nTXQRENDH2aHXvv9xFqRJ1+ywLEtqvr/wi0gLaakJ6pqWpTzS29NA+5Y5cSlqpE4X\n znj4oGPd8qjXjvdBZb4w4tNHQMUJziJyYcSnr5LSJDpZroy4+0Z/5RveJ8C92M1QBe2mDKOypHyKyOSw\n aod2UKhWG4oqmOEUkclhbQLoW6TYaQRuOEVkclgbixUTISMCiW8JEZkc1ibkjdVmROR5SojI5LCO3+I+\n k/25/3X9/tX+3eGntab1Bd2X0og=''' )\n\n\nif __name__ == '__main__':\n cli.choose( main, unittest )\n","sub_path":"examples/elasticity.py","file_name":"elasticity.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"291138953","text":"#!/usr/bin/env python\n\n\ndef addcomplex(x,y):\n xr = x[0]\n xi = x[1]\n yr = y[0]\n yi = y[1]\n soma_r = xr + yr\n soma_i = xi + yi\n soma = (soma_r , soma_i)\n return soma\n\ndef multiplycomplex(x,y):\n xr = x[0]\n xi = x[1]\n yr = y[0]\n yi = y[1]\n mult_r = xr * yi - (yr * xi)\n mult_i = xr * yi + xi * yr\n mult = (mult_r, mult_i)\n return mult\n\ndef printcomplex(x):\n print(str(x[0]) + \", \" + str(x[1]) + \"i\")\n\n\n\ndef main():\n c1 = (5, 3)\n c2 = (-2, 7)\n\n # Test add\n c3 = addcomplex(c1, c2)\n printcomplex(c3)\n\n # test multiply\n printcomplex(multiplycomplex(c1, c2))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"part3/ex2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"212282156","text":"import json\nimport time\nimport hmac\nimport hashlib\nimport requests\n\nfrom django.conf import settings\n\n\n# def send_slack_message(text, channel=\"#robot-dojo\"):\n# '''needs to be deprecated, you gotta fix the jokes first'''\n# endpoint = 'https://slack.com/api/chat.postMessage'\n# headers = {\n# 'Content-Type': 'application/json; charset=utf-8',\n# 'Authorization': 'Bearer {}'.format(settings.SLACK_AUTH_TOKEN)\n# }\n# payload = {\n# 'text': text,\n# 'channel': channel\n# }\n# r = requests.post(endpoint, data=json.dumps(payload), headers=headers)\n# if r.status_code == 200:\n# return True\n# return False\n\ndef send_slack_message(blocks, channel=\"#robot-dojo\"):\n endpoint = 'https://slack.com/api/chat.postMessage'\n headers = {\n 'Content-Type': 'application/json; charset=utf-8',\n 'Authorization': 'Bearer {}'.format(settings.SLACK_AUTH_TOKEN)\n }\n payload = {\n # 'text': text,\n 'blocks': blocks,\n 'channel': channel\n }\n print(payload)\n r = requests.post(endpoint, data=json.dumps(payload), headers=headers)\n print(r.content)\n if r.status_code == 200:\n return True\n return False\n\ndef build_slack_mrkdwn_block(text):\n return {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": text\n }\n }\n\n\ndef verify_slack_request(request):\n if settings.REQUIRE_AUTHED_SLACK_REQUESTS:\n try:\n timestamp = request.headers['X-Slack-Request-Timestamp']\n print(timestamp)\n slack_signature = request.headers['X-Slack-Signature']\n except:\n return False\n\n if abs(time.time() - int(timestamp)) > 60 * 5:\n # The request timestamp is more than five minutes from local time. It could be a replay attack, so let's ignore it.\n return False\n\n request_body = request.body.decode('utf-8')\n sig_basestring = 'v0:' + timestamp + ':' + request_body\n\n hash = hmac.new(settings.SLACK_SIGNING_SECRET.encode('utf-8'), sig_basestring.encode('utf-8'), hashlib.sha256).hexdigest()\n my_signature = 'v0=' + hash\n\n if my_signature == slack_signature:\n # hooray, the request came from Slack!\n return True\n return False\n else:\n return True\n","sub_path":"candidates/utils/slack.py","file_name":"slack.py","file_ext":"py","file_size_in_byte":2349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"222145229","text":"import csv\n\ndef convert_to_dict(filename):\n \"\"\"\n Convert a CSV file to a list of Python dictionaries.\n \"\"\"\n # open a CSV file - note - must have column headings in top row\n datafile = open(filename, newline='')\n\n # create list of OrderedDicts as of Python 3.6\n my_reader = csv.DictReader(datafile)\n\n # write it all out to a new list\n list_of_dicts = []\n for row in my_reader:\n # we convert each row to a string and add a newline\n list_of_dicts.append( dict(row) )\n\n # close original csv file\n datafile.close()\n # return the list\n return list_of_dicts\n","sub_path":"assignment10/modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"178648829","text":"# compute calibration parameters\n\nimport os, datetime\nfrom pylab import *\nfrom lab import *\nfrom scipy.optimize import curve_fit\nfrom mpl_toolkits.mplot3d import Axes3D\n\n########## parameters ###########\n\nexclude = list(range(1,15)) # files to exclude from data\noutFile = open('anal2-out.txt', 'a')\n\n########### read data ###########\n\nprint('reading data...')\n\nifile = 1\npoints = [[], []]\nerrors = [[], []]\nwhile True:\n\tif not ifile in exclude:\n\t\tfileNames = [\"data/data-%03d-%s.txt\" % (ifile, s) for s in ['acc', 'mag']]\n\t\tif any([not os.path.isfile(fileName) for fileName in fileNames]):\n\t\t\tbreak\n\t\tfor i in range(len(fileNames)):\n\t\t\tdata = loadtxt(fileNames[i], unpack=True, dtype='int16')\n\t\t\tpoints[i].append([data[j].mean() for j in range(3)])\n\t\t\terrors[i].append([data[j].std(ddof=1)/sqrt(len(data[j])) for j in range(3)])\n\tifile += 1\n\npoints = asarray(points)\nerrors = asarray(errors)\n\nacc_points = transpose(points[0])\nacc_errors = transpose(errors[0])\n\nmag_points = transpose(points[1])\nmag_errors = transpose(errors[1])\n\n####################### plot data ########################\n\nfig = figure(2)\nax = fig.add_subplot(121, projection='3d')\nax.set_title('Accelerometer')\nax.set_xlabel('x')\nax.set_ylabel('y')\nax.set_zlabel('z')\nax.scatter(*acc_points)\nax = fig.add_subplot(122, projection='3d')\nax.set_title('Magnetometer')\nax.set_xlabel('x')\nax.set_ylabel('y')\nax.set_zlabel('z')\nax.scatter(*mag_points)\nshow()\n\n############### accelerometer calibration ###############\n\nprint('accelerometer calibration...')\n\n# calibrated values along axes, given measured values\ndef cal_accxyz(axyz, cx, cy, cz, gx, gy, gz, alpha, beta, gamma):\n\tax = (axyz[0] - cx)/gx\n\tay = ((axyz[1] - cy)/gy - ax * sin(alpha)) / cos(alpha)\n\taz = ((axyz[2] - cz)/gz - sin(beta) * (ay*sin(gamma) + ax*cos(gamma))) / cos(beta)\n\treturn ax, ay, az\n\n# helper function to compute errors, output not normalized!\ndef cal_daccxyz(caxyz, cx, cy, cz, gx, gy, gz, alpha, beta, gamma):\n\tax, ay, az = caxyz\n\tdax = (ax - ay*tan(alpha) - az*tan(beta)*cos(gamma)) / gx\n\tday = (ay - az*tan(beta)*sin(gamma)) / (gy*cos(alpha))\n\tdaz = az / (gz*cos(beta))\n\treturn dax, day, daz\n\n# calibrated module of acceleration\ndef cal_acc(axyz, cx, cy, cz, gx, gy, gz, alpha, beta, gamma):\n\tax, ay, az = cal_accxyz(axyz, cx, cy, cz, gx, gy, gz, alpha, beta, gamma)\n\treturn sqrt(ax**2 + ay**2 + az**2)\n\n# error of calibrated module of acceleration, given errors on measures\ndef cal_dacc(axyz, daxyz, cx, cy, cz, gx, gy, gz, alpha, beta, gamma):\n\tax, ay, az = cal_accxyz(axyz, cx, cy, cz, gx, gy, gz, alpha, beta, gamma)\n\tdax, day, daz = cal_daccxyz((ax, ay, az), cx, cy, cz, gx, gy, gz, alpha, beta, gamma)\n\ta = sqrt(ax**2 + ay**2 + az**2)\n\tda = (dax*daxyz[0])**2 + (day*daxyz[1])**2 + (daz*daxyz[2])**2\n\treturn sqrt(da / a)\n\n# normalized residuals\ndef cal_accnres(axyz, daxyz, cx, cy, cz, gx, gy, gz, alpha, beta, gamma):\n\tax, ay, az = cal_accxyz(axyz, cx, cy, cz, gx, gy, gz, alpha, beta, gamma)\n\tdax, day, daz = cal_daccxyz((ax, ay, az), cx, cy, cz, gx, gy, gz, alpha, beta, gamma)\n\ta = sqrt(ax**2 + ay**2 + az**2)\n\tda = (dax*daxyz[0])**2 + (day*daxyz[1])**2 + (daz*daxyz[2])**2\n\treturn (a - 1) / sqrt(da / a)\n\ndef fit_accel(axyz, daxyz, center=(0,0,0), grav=(1,1,1), angles=(0,0,0), **kw):\n\tcycles = 1\n\ty = ones(len(axyz[0]))\n\tp0 = concatenate((center, grav, angles))\n\tpar, cov = curve_fit(cal_acc, axyz, y, p0=p0, **kw)\n\tsigma = sqrt(diag(cov))\n\terror = sigma # to pass loop condition\n\tp0 = par\n\twhile any(error > sigma / 1000):\n\t\tsigmayeff = cal_dacc(axyz, daxyz, *p0)\n\t\tpar, cov = curve_fit(cal_acc, axyz, y, p0=p0, sigma=sigmayeff, **kw)\n\t\tsigma = sqrt(diag(cov))\n\t\terror = abs(par - p0)\n\t\tp0 = par\n\t\tcycles += 1\n\tnres = cal_accnres(axyz, daxyz, *p0)\n\tprint(\"fit cycles = %d\" % cycles)\n\treturn par, cov, nres\n\nacc_par, cov, acc_nres = fit_accel(acc_points, acc_errors, grav=[250]*3, absolute_sigma=False)\nacc_chi2 = sum(acc_nres ** 2)\nacc_ndof = len(acc_points[0]) - len(acc_par)\nacc_sigma = sqrt(diag(cov))\n\n################# magnetometer calibration ##################\n\nprint('magnetometer calibration...')\n\nmag_par, cov, mag_nres = fit_accel(mag_points, mag_errors, center=(-500,0,500), grav=[600]*3, absolute_sigma=False, maxfev=20000)\nmag_chi2 = sum(mag_nres ** 2)\nmag_ndof = len(mag_points[0]) - len(mag_par)\nmag_sigma = sqrt(diag(cov))\n\n################# write results #################\n\nstring = ''\n\nstring += 'Accelerometer:\\n'\nfor i in range(len(acc_par)):\n\tstring += \"%s = %s\\n\" % (['0x', '0y', '0z', 'gx', 'gy', 'gz', 'A', 'B', 'G'][i], util_format(acc_par[i], acc_sigma[i]))\nstring += \"chi2/ndof = %.3g/%d = %.3g\" % (acc_chi2, acc_ndof, acc_chi2 / acc_ndof)\n\nstring += '\\n\\n'\n\nstring += 'Magnetometer:\\n'\nfor i in range(len(mag_par)):\n\tstring += \"%s = %s\\n\" % (['0x', '0y', '0z', 'bx', 'by', 'bz', 'A', 'B', 'G'][i], util_format(mag_par[i], mag_sigma[i]))\nstring += \"chi2/ndof = %.3g/%d = %.3g\" % (mag_chi2, mag_ndof, mag_chi2 / mag_ndof)\n\nprint('\\n' + string)\noutFile.write(\"# written by anal2.py on %s\\n\" % str(datetime.datetime.now()) + \"# fit on %d points\\n\\n\" % len(acc_points[0]) + string + '\\n\\n')\n\n####################### draw figures ########################\n\nfigure(1)\nclf()\nsubplot(121)\ntitle('acc')\nplot(1 + arange(len(acc_nres)), acc_nres, 'o')\nsubplot(122)\ntitle('mag')\nplot(1 + arange(len(mag_nres)), mag_nres, 'o')\ngrid()\nshow()\n\n##################### close things ######################\n\noutFile.close()\n","sub_path":"Posizione/Calibrazione/Accel-mag/anal2.py","file_name":"anal2.py","file_ext":"py","file_size_in_byte":5388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"354810723","text":"\"\"\"15. 주민등록번호를 입력하면 남자인지 여자인지 알려주는 프로그램을 작성하시오. \r\n(리스트 split 과 슬라이싱 활용) \r\n\r\n예시\r\n<입력>\r\n주민등록번호 : 941130-3002222\r\n\r\n<출력>\r\n남자\r\n\"\"\"\r\n주민번호 = input(\"주민등록번호: \")\r\n주민번호 = 주민번호.split(\"-\")[1]\r\nif 주민번호[0] == \"1\" or 주민번호[0] == \"3\":\r\n print(\"남자\")\r\nelse:\r\n print(\"여자\")","sub_path":"quiz/pre_python_15.py","file_name":"pre_python_15.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"106899003","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('imager_images', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='album',\n name='cover',\n field=models.ForeignKey(related_name='cover_for', blank=True, to='imager_images.Photos', null=True),\n ),\n migrations.AlterField(\n model_name='album',\n name='photos',\n field=models.ManyToManyField(related_name='albums', to='imager_images.Photos', blank=True),\n ),\n ]\n","sub_path":"imagersite/imager_images/migrations/0002_auto_20150730_2342.py","file_name":"0002_auto_20150730_2342.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"142613813","text":"from typing import List \nclass Solution:\n def minimumTotal(self, triangle: List[List[int]]) -> int:\n n = len(triangle)\n dp = triangle[-1]\n\n for i in range(n - 2, -1, -1):\n for j in range(i + 1):\n dp[j] = triangle[i][j] + min(dp[j], dp[j + 1])\n \n return dp[0]\n\nif __name__ == \"__main__\":\n print(Solution().minimumTotal([\n [2],\n [3, 4],\n [6, 5, 7],\n [4, 1, 8, 3]\n ])) # 11\n \n ","sub_path":"triangle.py","file_name":"triangle.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"291623909","text":"# Copyright (C) 2011 Lukas Lalinsky\n# Distributed under the MIT license, see the LICENSE file for details.\n\nimport logging\nfrom contextlib import closing\nfrom sqlalchemy import sql\nfrom acoustid import tables as schema, const, chromaprint\nfrom acoustid.indexclient import IndexClientError\n\nlogger = logging.getLogger(__name__)\n\n\nFINGERPRINT_VERSION = 1\nPARTS = ((1, 20), (21, 100))\nPART_SEARCH_SQL = \"\"\"\nSELECT f.id, f.track_id, t.gid AS track_gid, score FROM (\n SELECT id, track_id, acoustid_compare2(fingerprint, query, %(max_offset)s) AS score\n FROM fingerprint, (SELECT %(fp)s::int4[] AS query) q\n WHERE\n length BETWEEN %(length)s - %(max_length_diff)s AND %(length)s + %(max_length_diff)s AND\n subarray(acoustid_extract_query(query), %(part_start)s, %(part_length)s) && acoustid_extract_query(fingerprint)\n) f JOIN track t ON f.track_id=t.id WHERE f.score > %(min_score)s ORDER BY f.score DESC, f.id\n\"\"\"\n\n\ndef decode_fingerprint(fingerprint_string):\n \"\"\"Decode a compressed and base64-encoded fingerprint\"\"\"\n fingerprint, version = chromaprint.decode_fingerprint(fingerprint_string)\n if version == FINGERPRINT_VERSION:\n return fingerprint\n\n\ndef lookup_fingerprint(conn, fp, length, good_enough_score, min_score, fast=False, max_offset=0):\n \"\"\"Search for a fingerprint in the database\"\"\"\n matched = []\n best_score = 0.0\n for part_start, part_length in PARTS:\n params = dict(fp=fp, length=length, part_start=part_start,\n part_length=part_length, max_length_diff=const.FINGERPRINT_MAX_LENGTH_DIFF,\n min_score=min_score, max_offset=max_offset)\n with closing(conn.execute(PART_SEARCH_SQL, params)) as result:\n for row in result:\n matched.append(row)\n if row['score'] >= best_score:\n best_score = row['score']\n if best_score > good_enough_score:\n break\n return matched\n\n\nclass FingerprintSearcher(object):\n\n def __init__(self, db, idx=None, fast=True):\n self.db = db\n self.idx = idx\n self.min_score = const.TRACK_GROUP_MERGE_THRESHOLD\n self.max_length_diff = const.FINGERPRINT_MAX_LENGTH_DIFF\n self.max_offset = const.TRACK_MAX_OFFSET\n self.fast = fast\n\n def _create_search_query(self, fp, length, condition):\n # construct the subquery\n f_columns = [\n schema.fingerprint.c.id,\n schema.fingerprint.c.track_id,\n sql.func.acoustid_compare2(schema.fingerprint.c.fingerprint, fp,\n self.max_offset).label('score'),\n ]\n f_where = sql.and_(\n condition,\n schema.fingerprint.c.length.between(length - self.max_length_diff,\n length + self.max_length_diff))\n f = sql.select(f_columns, f_where).alias('f')\n # construct the main query\n columns = [f.c.id, f.c.track_id, schema.track.c.gid.label('track_gid'), f.c.score]\n src = f.join(schema.track, schema.track.c.id == f.c.track_id)\n return sql.select(columns, f.c.score > self.min_score, src,\n order_by=[f.c.score.desc(), f.c.id])\n\n def _search_index(self, fp, length):\n # index search\n fp_query = self.db.execute(sql.select([sql.func.acoustid_extract_query(fp)])).scalar()\n if not fp_query:\n return []\n with closing(self.idx.connect()) as idx:\n results = idx.search(fp_query)\n if not results:\n return []\n min_score = results[0].score * 0.1 # at least 10% of the top score\n candidate_ids = [r.id for r in results if r.score > min_score]\n if not candidate_ids:\n return []\n # construct the query\n condition = schema.fingerprint.c.id.in_(candidate_ids)\n query = self._create_search_query(fp, length, condition)\n # database scoring\n matches = self.db.execute(query).fetchall()\n return matches\n\n def _search_database(self, fp, length, min_fp_id):\n # construct the query\n condition = sql.func.acoustid_extract_query(schema.fingerprint.c.fingerprint).op('&&')(sql.func.acoustid_extract_query(fp))\n if min_fp_id:\n condition = sql.and_(condition, schema.fingerprint.c.id > min_fp_id)\n query = self._create_search_query(fp, length, condition)\n # database scoring\n matches = self.db.execute(query).fetchall()\n return matches\n\n def _get_min_indexed_fp_id(self):\n if self.idx is None:\n return 0\n with closing(self.idx.connect()) as idx:\n return int(idx.get_attribute('max_document_id') or '0')\n\n def search(self, fp, length):\n min_fp_id = 0 if self.idx is None or self.fast else self._get_min_indexed_fp_id()\n matches = None\n if self.idx is not None:\n try:\n matches = self._search_index(fp, length)\n except IndexClientError:\n logger.exception(\"Index search error\")\n matches = None\n if not self.fast and not matches:\n matches = self._search_database(fp, length, min_fp_id)\n return matches or []\n\n\ndef insert_fingerprint(conn, data, submission_id=None, source_id=None):\n \"\"\"\n Insert a new fingerprint into the database\n \"\"\"\n with conn.begin():\n insert_stmt = schema.fingerprint.insert().values({\n 'fingerprint': data['fingerprint'],\n 'length': data['length'],\n 'bitrate': data.get('bitrate'),\n 'format_id': data.get('format_id'),\n 'track_id': data['track_id'],\n 'submission_count': 1,\n })\n id = conn.execute(insert_stmt).inserted_primary_key[0]\n if submission_id and source_id:\n insert_stmt = schema.fingerprint_source.insert().values({\n 'fingerprint_id': id,\n 'submission_id': submission_id,\n 'source_id': source_id,\n })\n conn.execute(insert_stmt)\n logger.debug(\"Inserted fingerprint %r\", id)\n return id\n\n\ndef inc_fingerprint_submission_count(conn, id, submission_id=None, source_id=None):\n update_stmt = schema.fingerprint.update().where(schema.fingerprint.c.id == id)\n conn.execute(update_stmt.values(submission_count=sql.text('submission_count+1')))\n if submission_id and source_id:\n insert_stmt = schema.fingerprint_source.insert().values({\n 'fingerprint_id': id,\n 'submission_id': submission_id,\n 'source_id': source_id,\n })\n conn.execute(insert_stmt)\n return True\n\n\ndef update_fingerprint_index(db, index, limit=1000):\n with closing(index.connect()) as index:\n max_id = int(index.get_attribute('max_document_id') or '0')\n last_id = max_id\n query = sql.select([\n schema.fingerprint.c.id,\n sql.func.acoustid_extract_query(schema.fingerprint.c.fingerprint),\n ]).where(schema.fingerprint.c.id > max_id).\\\n order_by(schema.fingerprint.c.id).limit(limit)\n in_transaction = False\n for id, fingerprint in db.execute(query):\n if not in_transaction:\n index.begin()\n in_transaction = True\n logger.debug(\"Adding fingerprint %s to index %s\", id, index)\n index.insert(id, fingerprint)\n last_id = id\n if in_transaction:\n index.commit()\n logger.info(\"Updated index %s up to fingerprint %s\", index, last_id)\n","sub_path":"acoustid/data/fingerprint.py","file_name":"fingerprint.py","file_ext":"py","file_size_in_byte":7587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"9042753","text":"import numpy as np\nfrom numpy.linalg import norm\nimport pygame\n\nfrom helpers import polar_angle, polar_to_cartesian, norm2, random_unit, basis\n\n\nclass Cloud:\n def __init__(self, position, velocity, number, lifetime, size, gravity_scale=1.0, image_path='',\n start_color=(255, 255, 255), end_color=(255, 255, 255), base_velocity=(0, 0), shading=0.0, shine=0.0,\n stretch=0.0):\n self.position = np.repeat(np.array(position, dtype=float)[np.newaxis], number, axis=0)\n self.velocity = 0.5 * np.repeat(np.array(base_velocity, dtype=float)[np.newaxis], number, axis=0)\n self.acceleration = np.zeros_like(self.position)\n self.lifetime = lifetime\n self.size = size\n self.gravity_scale = gravity_scale\n self.image_path = image_path\n self.start_color = np.array(start_color, dtype=int)\n self.end_color = np.array(end_color, dtype=int)\n self.shading = shading\n self.shine = shine\n self.stretch = stretch\n\n self.time = 0.0\n\n self.add_particles(velocity, number)\n self.active = True\n\n def add_particles(self, velocity, number):\n if np.any(velocity):\n angle = polar_angle(velocity)\n else:\n angle = None\n\n v_norm = norm(velocity)\n for i in range(number):\n if angle is None:\n v = 0.5 * random_unit()\n else:\n theta = np.random.normal(angle, 0.25)\n r = np.abs(np.random.normal(v_norm, v_norm))\n v = polar_to_cartesian(r, theta)\n self.velocity[i, :] += v\n\n def update(self, gravity, time_step):\n self.time += time_step\n if self.time >= self.lifetime:\n self.active = False\n return\n\n delta_pos = self.velocity * time_step + 0.5 * self.acceleration * time_step**2\n self.position += delta_pos\n acc_old = self.acceleration.copy()\n self.acceleration[:] = self.gravity_scale * gravity\n\n self.velocity += 0.5 * (acc_old + self.acceleration) * time_step\n\n def draw(self, screen, camera, image_handler):\n if not self.active:\n return\n\n size = int(camera.zoom * (1 - self.time / self.lifetime) * self.size)\n if size == 0:\n return\n\n if self.image_path:\n scale = size / 100\n\n img = image_handler.images[self.image_path]\n image = pygame.transform.rotozoom(img, 0.0, scale)\n\n for p in self.position:\n rect = image.get_rect()\n rect.center = camera.world_to_screen(p)\n\n screen.blit(image, rect)\n else:\n color = self.start_color + self.time / self.lifetime * (self.end_color - self.start_color)\n size = [(1 + self.stretch * norm2(self.velocity)) * size, size]\n for i, p in enumerate(self.position):\n surface = pygame.Surface(size)\n surface.set_colorkey(pygame.Color('black'))\n\n if self.shading:\n pygame.draw.ellipse(surface, (1 - self.shading) * color, [0, 0] + size)\n pygame.draw.ellipse(surface, color, [size[0] // 4, 0, 0.8 * size[0], 0.8 * size[1]])\n else:\n pygame.draw.ellipse(surface, color, [0, 0] + size)\n\n if self.shine:\n pygame.draw.circle(surface, color + (255 - color) * self.shine, [int(0.8 * size[0]), size[1] // 2],\n size[1] // 5)\n\n surface = pygame.transform.rotate(surface, np.degrees(polar_angle(self.velocity[i, :])))\n pos = camera.world_to_screen(p)\n screen.blit(surface, [pos[0] - 0.5 * size[0], pos[1] - 0.5 * size[1]])\n\n\nclass BloodSplatter(Cloud):\n def __init__(self, position, direction, number=10):\n super().__init__(position, direction, number, 10.0, 0.5, start_color=(255, 0, 0), end_color=(255, 0, 0),\n shading=0.17, shine=1.0, stretch=0.5)\n\n\nclass MuzzleFlash(Cloud):\n def __init__(self, position, velocity, base_velocity=(0, 0)):\n super().__init__(position, velocity, 3, 10.0, 0.8, base_velocity=base_velocity, gravity_scale=0.0,\n start_color=(255, 255, 200), end_color=(255, 215, 0))\n\n\nclass Explosion(Cloud):\n def __init__(self, position):\n super().__init__(position, np.zeros(2), 10, 8.0, 5.0, gravity_scale=0.0, start_color=(255, 255, 255),\n end_color=(50, 50, 50), shading=0.2)\n\n\nclass Sparks(Cloud):\n def __init__(self, position, velocity):\n super().__init__(position, velocity, 3, 5.0, 0.4)\n\n\nclass Dust(Cloud):\n def __init__(self, position, velocity, number):\n super().__init__(position, 0.2 * velocity, number, 5.0, 0.7, gravity_scale=0.5, start_color=(200, 200, 200),\n end_color=(200, 200, 200), shading=0.2)\n","sub_path":"particle.py","file_name":"particle.py","file_ext":"py","file_size_in_byte":4932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"132204302","text":"GRADE_CHOICES = (\r\n (\"선택안함\", \"선택안함\"),\r\n (\"1학년\", \"1학년\"),\r\n (\"2학년\", \"2학년\"),\r\n (\"3학년\", \"3학년\"),\r\n (\"4학년\", \"4학년\"),\r\n (\"졸업생\", \"졸업생\"),\r\n)\r\n\r\nLEVEL_CHOICES = (\r\n (\"3\", \"Lv3_미인증사용자\"),\r\n (\"2\", \"Lv2_인증사용자\"),\r\n (\"1\", \"Lv1_관리자\"),\r\n (\"0\", \"Lv0_개발자\"),\r\n)\r\n\r\nCIRCLES_CHOICES = (\r\n (\"미가입\", \"미가입\"),\r\n (\"NUXPIA\", \"NUXPIA\"),\r\n (\"NET\", \"NET\"),\r\n (\"DOT-GABI\", \"DOT-GABI\"),\r\n (\"IMAGINE\", \"IMAGINE\"),\r\n (\"P&N\", \"P&N\"),\r\n (\"MEGA-BRAIN\", \"MEGA-BRAIN\"),\r\n)\r\n\r\nDEPARTMENT_CHOICES = (\r\n (\"선택안함\", \"선택안함\"),\r\n (\"외부인\", \"학부생이 아님\"),\r\n (\"컴퓨터공학부\", \"컴퓨터공학부\"),\r\n (\"드론IOT시뮬레이션학부\", \"드론IOT시뮬레이션학부\"),\r\n (\"의과대학\", \"의과대학\"),\r\n (\"문리과대학\", \"문리과대학\"),\r\n (\"사회과학대학\", \"사회과학대학\"),\r\n (\"공과대학\", \"공과대학\"),\r\n (\"보건의료융합대학\", \"보건의료융합대학\"),\r\n (\"BNIT융합대학\", \"BNIT융합대학\"),\r\n (\"약학대학\", \"약학대학\"),\r\n)\r\n\r\nCATEGORY_CHOICES = (\r\n (\"자유\", \"자유\"),\r\n (\"질문\", \"질문\"),\r\n (\"정보\", \"정보\"),\r\n)","sub_path":"reborn_web/users/choice.py","file_name":"choice.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"605062330","text":"from itertools import permutations\ndef per(s):\n a=[]\n perm=permutations(s)\n for pe in list(perm):\n d=''.join(pe)\n a.append(d)\n l=set(a)\n for k in l:\n print(k)\ns=input()\nd=s[::-1]\nif(s==d):\n print(s)\nelse:\n per(s)\n","sub_path":"14h.py","file_name":"14h.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"315525754","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 5 15:35:22 2019\n\n@author: swilson5\n\"\"\"\nimport pytest\nfrom descriptive import Calculator\n\nprint('starting tests')\ndef test_mean():\n data = [2, 10, 15, 20, 45, 55, 80]\n instance = Calculator(data)\n actual_mean = instance.mean\n expected_mean = 32.42857\n message = \"Expected value: {0}, Actual value: {1}\".format(expected_mean,\n actual_mean)\n assert instance.mean == pytest.approx(expected_mean), message\n\n\ndef test_std():\n data = [2, 10, 15, 20, 45, 55, 80]\n instance = Calculator(data)\n actual_std = instance.stand_dev\n expected_std = 28.33641\n message = \"Expected STD: {0}, Actual value: {1}\".format(expected_std,\n actual_std)\n assert instance.stand_dev == pytest.approx(28.33641), message\n\ndef test_order():\n data = [2, 20, 45, 15, 10, 55, 80]\n instance = Calculator(data)\n expected_median =20\n actual_median = instance.median\n message = \"Expected value: {0}, Actual value: {1}, did you order the \".format(expected_median, actual_median)\n assert instance.median == 20, message\n\ndef test_add_data():\n data = [2, 20, 45, 15, 10, 55, 80]\n instance = Calculator(data)\n instance.add_data([60])\n actual_mean = instance.mean\n expected_mean = 35.875\n message = \"called add_data() and expected mean: {0}, but got value: {1}\".format(expected_mean,\n actual_mean)\n assert instance.mean == pytest.approx(expected_mean), message\n\ndef test_remove_data():\n data = [2, 20, 45, 15, 10, 55, 80]\n instance = Calculator(data)\n instance.remove_data([80])\n actual_mean = instance.mean\n expected_mean = 24.5\n message = \"called remove_data() and expected mean: {0}, but got value: {1}\".format(expected_mean,\n actual_mean)\n assert instance.mean == pytest.approx(expected_mean), message\n","sub_path":"Phase_3/Calculator_class/test_calculator.py","file_name":"test_calculator.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"20768179","text":"from .core_pages import Abstract_Page\n\nclass GraphCrawler(object):\n \"\"\"docstring for GraphCrawler\"\"\"\n def __init__(self, roots, parents_to_children=True):\n super(GraphCrawler, self).__init__()\n self.roots = roots\n self.parents_to_children = parents_to_children\n\n def get_next(self, node) :\n raise NotImplemented(\"Must be implemented in child\")\n\n def get_node_uid(self, node) :\n raise NotImplemented(\"Must be implemented in child\")\n \n def get_node_label(self, node) :\n raise NotImplemented(\"Must be implemented in child\")\n\n def get_node_parameters():\n return {}\n\n def get_edge_parameters():\n return {}\n \n def get_node_attributes():\n return {}\n \n def get_edge_attributes():\n return {}\n\nclass Abstract_Graph(Abstract_Page):\n \"\"\"docstring for DAG\"\"\"\n def __init__(self, notebook, name):\n super(Abstract_Graph, self).__init__(notebook, name)\n self._init()\n\n def _init(self) :\n self.caption = \"\"\n self.nodes, self.edges = {}, {}\n self.node_labels = set()\n\n def set_caption(self, caption) :\n self.caption = caption\n\n def force_set(self, nodes, edges) :\n self._init()\n for n in self.nodes :\n self.node_labels.add(d)\n\n self.nodes = nodes\n self.edges = edges\n\n def parse(self, fct, *args, **kwargs) :\n self.nodes, self.edges = fct(*args, **kwargs)\n \n def resolve_node_name(self, base_name, autoinc) :\n if not autoinc :\n return base_name\n\n name = base_name\n i = 1\n while name in self.node_labels :\n name = base_name + \"_%d\" % i \n i += 1\n\n return name\n\n def crawl(\n self,\n crawler,\n autoincrement_names=True,\n reset=False\n ) :\n\n def _derive(root, nodes, edges, node_labels) :\n root_name = self.resolve_node_name(crawler.get_node_label(root), autoincrement_names)\n node_labels.add(root_name)\n root_uid = crawler.get_node_uid(root)\n if root_uid :\n nodes[root_uid] = {\n \"label\": root_name,\n \"attributes\": {\n \"label\": root_name\n }\n }\n\n nodes[root_uid][\"attributes\"].update(crawler.get_node_attributes(root))\n self.nodes[root_uid][\"label\"] = self.nodes[root_uid][\"label\"] + \"(%s)\" % ( len(self.nodes[root_uid][\"attributes\"]) -1 )\n\n nodes[root_uid].update(crawler.get_node_parameters(root))\n\n for d in crawler.get_next(root) :\n if d is not root :\n child_uid = crawler.get_node_uid(d)\n if child_uid :\n if crawler.parents_to_children :\n con = (root_uid, child_uid)\n else :\n con = (child_uid, root_uid)\n \n self.edges[con] = {}\n self.edges[con][\"attributes\"] = crawler.get_edge_attributes(con[0], con[1])\n \n self.edges[con].update(crawler.get_edge_parameters(con[0], con[1]))\n\n _derive(d, nodes, edges, node_labels)\n\n if reset :\n self._init()\n \n for root in crawler.roots :\n _derive(root, self.nodes, self.edges, self.node_labels)\n\nclass DagreGraph(Abstract_Graph) :\n \"\"\"\"\"\"\n\n def __init__(self, notebook, name):\n super(DagreGraph, self).__init__(notebook, name)\n self.graph_attributes = {}\n\n def set_attributes(self, dct) :\n self.graph_attributes = dct\n\n def reset_css(self) :\n self.css_rules = {}\n self.css_rules[\"text\"] = (\n \"font-weight: 300\",\n 'font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif',\n 'font-size: 14px'\n )\n\n self.css_rules[\".node rect\"] = (\n \"stroke: #999\",\n 'fill: #fff',\n 'stroke-width: 1.5px'\n )\n\n self.css_rules[\".edgePath path\"] = (\n \"stroke: #333\",\n 'stroke-width: 1.5px'\n ) \n\n def get_html(self) :\n def _pseudo_jsonify(dct) :\n attrs = [ ]\n for k, v in dct.items() :\n if type(v) is dict :\n vv = _pseudo_jsonify(v)\n attrs.append(\"'%s': {%s}\" % (k, vv))\n else :\n attrs.append(\"'%s': '%s'\" % (k, v))\n\n return ','.join(attrs)\n\n def _set_nodes() :\n res = []\n for node_id, params in self.nodes.items() :\n attrs = _pseudo_jsonify(params)\n res.append( \"g.setNode('{node_id}', {{ {attributes} }} );\".format(node_id = node_id, attributes = attrs ))\n \n return '\\n'.join(res)\n\n def _set_edges() :\n res = []\n for n1, n2 in self.edges :\n res.append( \"g.setEdge('%s', '%s')\" % (n1, n2) ) ;\n return '\\n'.join(res)\n\n graph_attributes = \"{%s}\" % _pseudo_jsonify(self.graph_attributes)\n \n template = \"\"\"\n \n \n \n
\n {caption}\n
\n
\n \n
\n
\n

Graph attributes

\n
\n
\n
\n

Node attributes

\n
\n
\n
\n
\n\n\n \n \"\"\".format(nodes = _set_nodes(), edges= _set_edges(), graph_attributes=graph_attributes, caption=self.caption, libs=self.notebook.lib_folder)\n\n return template\n\n","sub_path":"Spyrkle/pages/graph_pages.py","file_name":"graph_pages.py","file_ext":"py","file_size_in_byte":7878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"451189358","text":"from Jumpscale import j\n\nJSBASE = j.baseclasses.builder\n\nbuilder_method = j.baseclasses.builder_method\n\n\nclass BuilderCapnp(JSBASE):\n __jslocation__ = \"j.builders.libs.capnp\"\n\n @builder_method()\n def build(self):\n \"\"\"\n install capnp\n\n kosmos 'j.builders.libs.capnp.build(reset=True)'\n kosmos 'j.builders.libs.capnp.build()'\n \"\"\"\n\n # j.builders.buildenv.install()\n if self.tools.platform_is_ubuntu:\n j.builders.system.package.ensure(\"g++\")\n\n # build tools\n self.system.package.mdupdate()\n self.system.package.install([\"autoconf\", \"automake\", \"libtool\"])\n\n # build from source\n install_cmd = \"\"\"\n cd {DIR_BUILD}\n rm -rf capnproto\n git clone --depth 1 https://github.com/sandstorm-io/capnproto.git\n cd capnproto/c++\n autoreconf -i\n ./configure\n make -j6 check\n make install\n \"\"\"\n self._execute(install_cmd, timeout=1000)\n\n @builder_method()\n def install(self):\n \"\"\"\n install capnp\n\n kosmos 'j.builders.libs.capnp.install()'\n \"\"\"\n if self.tools.platform_is_ubuntu:\n j.builders.system.package.ensure(\"g++\")\n # j.builders.runtimes.python3.pip_package_install(['cython', 'setuptools', 'pycapnp'])\n bins = [\"capnp\", \"capnp-afl-testcase\", \"capnpc-c++\", \"capnp-test\", \"capnpc-capnp\", \"capnp-evolution-test\"]\n\n libs = [\n \"libkj-http.so\",\n \"libkj-async.so\",\n \"libcapnpc.so\",\n \"libkj-tls.so\",\n \"libcapnp.so\",\n \"libkj-test.so\",\n \"libcapnp-json.so\",\n \"libcapnp-rpc.so\",\n \"libkj.so\",\n \"libkj-async-0.8-dev.so\",\n \"libkj-test-0.8-dev.so\",\n \"libcapnpc-0.8-dev.so\",\n \"libcapnp-json-0.8-dev.so\",\n \"libcapnp-0.8-dev.so\",\n \"libkj-0.8-dev.so\",\n \"libkj-http-0.8-dev.so\",\n \"libkj-tls-0.8-dev.so\",\n \"libcapnp-rpc-0.8-dev.so\",\n ]\n # copy bins\n bins_src_path = \"{DIR_BUILD}/capnproto/c++/.libs/\"\n for bin in bins:\n self._copy(bins_src_path + bin, \"{DIR_BIN}\")\n\n # copy libs\n for lib in libs:\n self._copy(bins_src_path + lib, \"{DIR_BASE}/lib\")\n\n @builder_method()\n def clean(self):\n code_dir = self._joinpaths(self.DIR_BUILD, \"capnproto\")\n self._remove(code_dir)\n\n @builder_method()\n def reset(self):\n super().reset()\n self.clean()\n\n @builder_method()\n def sandbox(self, zhub_client=None, flist_create=True, merge_base_flist=\"\"):\n bin_dest = self._joinpaths(self.DIR_SANDBOX, \"sandbox\", \"bin\")\n lib_dest = self._joinpaths(self.DIR_SANDBOX, \"sandbox\", \"lib\")\n\n bins = [\"capnp\", \"capnp-afl-testcase\", \"capnpc-c++\", \"capnp-test\", \"capnpc-capnp\", \"capnp-evolution-test\"]\n\n libs = [\n \"libkj-http.so\",\n \"libkj-async.so\",\n \"libcapnpc.so\",\n \"libkj-tls.so\",\n \"libcapnp.so\",\n \"libkj-test.so\",\n \"libcapnp-json.so\",\n \"libcapnp-rpc.so\",\n \"libkj.so\",\n \"libkj-async-0.8-dev.so\",\n \"libkj-test-0.8-dev.so\",\n \"libcapnpc-0.8-dev.so\",\n \"libcapnp-json-0.8-dev.so\",\n \"libcapnp-0.8-dev.so\",\n \"libkj-0.8-dev.so\",\n \"libkj-http-0.8-dev.so\",\n \"libkj-tls-0.8-dev.so\",\n \"libcapnp-rpc-0.8-dev.so\",\n ]\n # copy bins\n for bin in bins:\n self._dir_ensure(bin_dest)\n bin_src = self._joinpaths(\"{DIR_BASE}/bin/\", bin)\n self._copy(bin_src, bin_dest)\n\n # copy libs\n for lib in libs:\n self._dir_ensure(lib_dest)\n lib_src = self._joinpaths(\"{DIR_BASE}/lib/\", lib)\n self._copy(lib_src, lib_dest)\n\n @builder_method()\n def test(self):\n \"\"\"\n kosmos 'j.builders.builder.libs.capnp.test()'\n \"\"\"\n return_code, _, _ = self._execute(\"capnp-test\")\n assert return_code == 0\n print(\"TEST OK\")\n","sub_path":"JumpscaleBuilders/libs/BuilderCapnp.py","file_name":"BuilderCapnp.py","file_ext":"py","file_size_in_byte":4140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"359232318","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 31 17:02:09 2019\n\n@author: Education\n\"\"\"\nst_list = []\nwhile True:\n St_name = input(\"> \")\n #if St_name ==\"\":\n # break\n if len(st_list) <= 25:\n st_list.append(St_name) \n else:\n break\ntry:\n file = open(\"Student1.txt\",\"wt\") \n file.writelines(st_list)\nexcept Exception as e:\n print(\"EXception:\",e)\nelse:\n file = open(\"Student1.txt\",\"rt\") \n print(file.read())\nfinally:\n file.close()","sub_path":"absentee.py","file_name":"absentee.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"469080874","text":"\"\"\"\nImport Lambeth Council\n\"\"\"\nfrom django.contrib.gis.geos import Point\n\nfrom data_collection.management.commands import BaseJasonImporter\n\nclass Command(BaseJasonImporter):\n \"\"\"\n Imports the Polling station/district data from Lambeth Council\n \"\"\"\n council_id = 'E09000022'\n districts_name = 'LambethPollingDistricts.json'\n stations_name = 'LambethPollingStations_0.csv'\n\n def district_record_to_dict(self, record):\n properties = record['properties']\n return dict(\n council=self.council,\n internal_council_id=properties['OBJECTID'],\n name=properties['WARD'],\n )\n\n def station_record_to_dict(self, record):\n location = Point(int(record.easting), int(record.northing), srid=self.srid)\n return dict(\n council=self.council,\n internal_council_id=record.district_code,\n postcode=record.postcode,\n address=\"\\n\".join([record.venue, record.address]),\n location=location\n )\n","sub_path":"polling_stations/apps/data_collection/management/commands/import_lambeth.py","file_name":"import_lambeth.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"577670184","text":"#!/usr/local/bin/python\nimport sys\nsys.azeplist_cvs_string='$Id: azeplist.py 35 2012-11-16 08:35:41Z $'\n#\n#\n# -- history --\n#\n# ... => 1.6\n# x1 mark INACTIVE MA's, show version in title\n# x2 restart on \n# => 1.7\n# x3 -Button nach unten versetzt (platzsparend)\n# => 1.8\n# x3 = delete permissions for MA with status=0 (i.e those marked already +++)\n# => 1.9\n# 1.9x4 scrolled frame, using environ(qqq)\n# => 1.10\n#\n#\n#\nfrom Tkinter import *\nfrom calendar import *\nimport os, time\nimport string\nimport Pmw\nimport azeutil\t\t# utilities\nfrom tkMessageBox import showinfo\n\n\nif sys.platform == 'win32':\n\tfrom odbc import *\n\tdsn_aze = 'irm-aze3'\nelse:\n\tfrom mxODBC import *\n\tdsn_aze = 'aze3'\n#\n# commandline overwrites dsn ...\n#\ntry: dsn_aze=sys.argv[1]\nexcept: pass\n#\n#\n#\nclass p_list:\n\tdef __init__(self, root=None, geom='', parent=None):\t\t\t# p_list.__init__\n\t\tself.root = root\n\t\tself.parent = parent\n\t\tif (not self.root):\n\t\t\tself.root = Tk()\n\t\tself.root.resizable(0, 0)\n\t\tself.root.geometry(geom)\n\t\tself.root.protocol('WM_DELETE_WINDOW', self.quit_cmd)\n\t\tself.root.protocol('WM_SAVE_YOURSELF', self.quit_cmd)\n\t\tself.root.iconname('iRM AZE')\n\t\t#\n\t\tself.root.bind('', self.quit_cmd)\n\t\tself.root.bind('', self.quit_cmd)\n\t\t#\n\t\tself.root.bind('', self.help_cmd)\t\t\n\t\tself.root.bind('', self.restart_cmd)\n\t\tself.root.bind('', self.delretired_cmd)\n\t\t#\n##\t\troot.focus_set()\t# synonym to focus_set() ?\n\t\troot.focus()\t\t# .. works if started from pytimex.py ; does not work if started from commandline\n\t\troot.focus_force()\t# .. works if started from commandline \n\t\t#\n\t\tself.logname = azeutil.get_logname()\n\t\tself.hostname = azeutil.get_hostname()\n\t\t#\n\t\tfld = string.split(sys.azeplist_cvs_string)\n\t\tself.cvsversion = fld[2]\n\t\tself.root.title('iRM AZE Privilegien [%s] (%s@%s)' % (self.cvsversion, self.logname, self.hostname) )\n\t\t#\n\t\t#\n\t\t#\n\t\t# azeplist has been getting larger, too large for some screen(setting)s =>\n\t\t# put into scrolled frame:\n\t\t# (cloned from vnk.py..)\n\n\t\t#### HullHeight from environment:\n\t\t###try:\tHH = int()\n\t\t###except: HH = 0\n\t\tif os.environ.has_key('AZE_VNKH'):\n\t\t\t#\n###\t\t\tHW = 766 # sollte (gerade)gross genug sein, dass KEIN HOR. scrollbar noetig ist qqq\n###\t\t\tHW = 1200\n###\t\t\tself.scrollframe = Pmw.ScrolledFrame(self.root, borderframe=2, usehullsize=1, hull_width=HW, hull_height=HH)\n###+\t\tself.scrollframe = Pmw.ScrolledFrame(self.root, borderframe=2, usehullsize=1, hull_height=HH)\n\t\t\tself.scrollframe = Pmw.ScrolledFrame(self.root, borderframe=2 ) # autosize .. OK\n\t\t\tself.scrollframe.pack(ipadx=5, pady=0, ipady=5, anchor=W, fill=X, expand=YES)\n\t\t\tself.frame = self.scrollframe.interior()\n\t\telse:\n\t\t\tself.frame = Frame(self.root)\n\t\t#-\n\t\t\n\t\tself.frame.grid(padx=20, pady=10)\n\t\t#\n\t\tself.lst_frm = Frame(self.frame)\n\t\tself.lst_frm.grid(row=2, column=1, sticky='ew')\n\t\t#\n\t\tself.nav_frm = Frame(self.frame)\n\t\tself.nav_frm.grid(row=0, column=1, sticky='w')\n\t\t#\n\t\t# maid\n\t\t#\n\t\tif sys.platform == 'win32':\n\t\t\tself.dbcnx = odbc('%s/%s/%s' % (dsn_aze, self.logname, 'ThePassword'))\n\t\telse:\n\t\t\tself.dbcnx = Connect(dsn_aze, self.logname, 'ThePassword', 0)\n\t\tself.dbcsr = self.dbcnx.cursor()\n\t\tself.dbcsr.execute(\n\t\t\t\"SELECT ID from MITARBEITER where LOGNAME='%s'\" % self.logname)\n\t\tself.maid = self.dbcsr.fetchall()[0][0]\n\t\t#\n\t\t# navigation panel\n\t\t#\n\t\tself.quit_b = Button(self.lst_frm, text='Quit ', command=self.quit_cmd)\n\t\tself.quit_b.grid(column=0, row=0)\n\t\t#\n\t\tallow_other = azeutil.get_aze_permission(self.dbcsr, self.maid, 'VIEW_OTHER')\n\t\t#\n\t\t# user list and directory\n\t\t#\n\t\tself.user_list = []\n\t\tself.user_dict = {}\n\t\tif allow_other:\n\t\t\tqstr = \"\"\"\n\t\t\t\tSELECT distinct ID, KNAME,VORNAME,ZUNAME, STATUS from MITARBEITER, AZE_PRIVILEGES\n\t\t\t\twhere ID=MAID order by ZUNAME\"\"\"\n\t\telse:\n\t\t\tqstr = \"SELECT ID, KNAME,VORNAME,ZUNAME, STATUS from MITARBEITER where ID=%d\"%(self.maid)\n\t\t#\n\t\tself.dbcsr.execute( qstr )\n\t\trr = self.dbcsr.fetchall()\n\t\tself.max_name_length = 0\n\t\tself.retired = []\n\t\tfor r in rr:\n\t\t\tmaid = r[0]\n\t\t\tself.user_list.append(maid)\n\t\t\tname = '%s (%s %s)' % (r[1],r[2],r[3])\n\t\t\ttry:\n\t\t\t\tif int(r[4])==0:\n\t\t\t\t\tname = '+++ ' + name\n\t\t\t\t\tself.retired.append(maid)\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\tself.user_dict[r[0]] = name\n\t\t\tif self.max_name_length < len(name):\n\t\t\t\tself.max_name_length = len(name)\n\t\tself.max_name_length = self.max_name_length + 2\n\t\t#\n\t\t# permission list\n\t\t#\n\t\tqstr = \"SELECT distinct PERMISSION from AZE_PRIVILEGES order by PERMISSION\"\n\t\tself.dbcsr.execute( qstr )\n\t\trr = self.dbcsr.fetchall()\n\t\tself.permission_list = []\n\t\tfor r in rr:\n\t\t\tself.permission_list.append(r[0])\n\t\t#\n\t\t# permission dict\n\t\t#\n\t\tqstr = \"SELECT distinct MAID,PERMISSION from AZE_PRIVILEGES\"\n\t\tself.dbcsr.execute( qstr )\n\t\trr = self.dbcsr.fetchall()\n\t\tself.permission_dict = {}\n\t\tfor r in rr:\n\t\t\tself.permission_dict[string.join((str(r[0]),r[1]),'_')] = 0\n\t\t#\n\t\t# draw list\n\t\t#\n\t\tself.llist = []\n\t\t###label = Label(self.lst_frm, text='',width=self.max_name_length+3)\n\t\t###label.grid(column=0, row=0, sticky='ew')\n\t\t###self.llist.append(label)\n\t\tcol = 1\n\t\t# draw title line, permissions:\n\t\tfor p in self.permission_list:\n\t\t\tpp = string.split(p,'_')\n\t\t\tp2 = pp[0]\n\t\t\tlen2 = len(p2)\n\t\t\tfor x in pp[1:]:\n\t\t\t\tp2 = p2 + '_'\n\t\t\t\tlen2 = max(len(p2),len2)\n\t\t\t\tp2 = p2 + '\\n' + x\n\t\t\tlabel = Label(self.lst_frm, text=p2, width=len2+5)\n\t\t\tlabel.grid(column=col, row=0, sticky='ew')\n#\t\t\tlabel.config(relief=RAISED, border=1)\n\t\t\tself.llist.append(label)\n\t\t\tcol = col+1\n\t\tr = 1\n\t\t# draw one line per user:\n\t\tfor u in self.user_list:\n\t\t\t# col0: name\n\t\t\tlabel = Label(self.lst_frm, text=self.user_dict[u]+' ', anchor='e', border=1)\n\t\t\tlabel.grid(column=0, row=r, sticky='ew')\n#\t\t\tlabel.config(relief=RAISED, border=1)\n\t\t\tself.llist.append(label)\n\t\t\tcol=1\n\t\t\t# col1.. permissions\n\t\t\tfor p in self.permission_list:\n\t\t\t\ttxt=''\n\t\t\t\tif self.permission_dict.has_key(string.join((str(u),p),'_')):\n\t\t\t\t\ttxt = 'X'\n\t\t\t\tlabel = Label(self.lst_frm, text=txt,relief=RAISED, border=1)\n\t\t\t\tlabel.grid(column=col, row=r, sticky='ew')\n\t\t\t\tself.llist.append(label)\n\t\t\t\tcol=col+1\n\t\t\tr = r+1\n\n\tdef delretired_cmd(self, event=None):\t\t# delete permissions for MA with status=0, marked '+++'\n\t\t#\n\t\tfor maid in self.retired:\n\t\t\tdstr = \"delete from AZE_PRIVILEGES where maid=%s\" % maid\n\t\t\t###print dstr\n\t\t\tself.dbcsr.execute(dstr)\n\t\t#--end for\n\t\tself.restart_cmd() \t# restart to display changes\n\n\t\t\t\n\tdef restart_cmd(self, event=None):\n\t\ttry:\tself.parent.plist_again = 1\n\t\texcept: pass\n\t\tself.quit_cmd()\n\n\tdef quit_cmd(self, event=None):\n\t\tself.root.destroy()\n\t\t\n\tdef info(self, msg, title=' AZE - VNK '):\n\t\tshowinfo(title=title, message=msg, parent=self.root)\n\n\tdef help_cmd(self, event=None):\n\t\t#\n\t\t#\n\t\tmsg = \"\"\"\n\n\t restart\n\t\n\t delete permissions for MA with status=0\n\n\t\n\n\"\"\" \n\n\t\tself.info(title=' AZE - Privilegien %s ' % self.cvsversion, msg=msg)\n\t\t\t\n\t#--end of help_cmd\n\t\t\n\nif __name__ == '__main__':\n\troot = Tk()\n\te1 = p_list(root, geom='+10+10')\n\troot.mainloop()\n","sub_path":"NOTES/AZE/pytimex/azeplist.py","file_name":"azeplist.py","file_ext":"py","file_size_in_byte":6928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"449097590","text":"## Read input as specified in the question.\n## Print output as specified in the question.\nnum=int(input())\ni=0\nwhile num>i:\n spaces=1\n while spaces<=i:\n print(\" \",end=\"\")\n spaces=spaces+1\n j=1\n while num-i >=j:\n print(j+i,end=\"\")\n j=j+1\n i=i+1\n print()\nwhile i>1:\n spaces=1\n while spaces<=i-2:\n print(\" \",end=\"\")\n spaces=spaces+1\n j=num\n k=1\n while j>=i-1:\n print(i+k-2,end=\"\")\n j=j-1\n k=k+1\n\n i=i-1\n print()\n","sub_path":"More On Loops/Print Number Pyramid.py","file_name":"Print Number Pyramid.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"222783057","text":"def checkio(data):\n # The middle index of the list\n i = int(len(data) / 2)\n\n # Sorting the list using built-in method\n data.sort()\n\n # If the list has an even number of elements\n if len(data) % 2 == 0:\n return (data[i] + data[i - 1]) / 2\n\n else:\n return data[i]\n","sub_path":"Median.py","file_name":"Median.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"251350780","text":"import numpy as np\nimport random\nimport cv2\n#import pandas as pd\nimport matplotlib.pyplot as plt\n\ndx=[-3,-3,-2,-1,0,1,2,3,3,3,2,1,0,-1,-2,-3]\ndy=[0,1,2,3,3,3,2,1,0,-1,-2,-3,-3,-3,-2,-1]\n'''\ndef training_set(images_names, threshold = 10, N = 12):\n IP=[]\n for index in range(0, len(images_names)) :\n\t\timage = cv2.imread(images_names[index], 0)\n\t\tP = interest_points(image, threshold, N)\n\t\tIP = IP + P\n\t\n df = pd.DataFrame(P)\n df.to_csv('RF.csv', index=False,header=False)\n return subset(interest_point)\n'''\ndef subset(interest_point):\n\tP,P_d,P_s,P_b=[],[],[],[]\n\t# shuffle x\n\tshuffle = random.randrange(16)\n\tfor index in range(0, len(interest_point)):\n\t\tif interest_point[index][shuffle] == 'd':\n\t\t\tP_d.append(interest_point[index])\n\t\tif interest_point[index][shuffle] == 's':\n\t\t\tP_s.append(interest_point[index])\n\t\tif interest_point[index][shuffle] == 'b':\n\t\t\tP_b.append(interest_point[index])\n\n\tP.extend((P_d,P_s,P_b))\n\n\treturn P\n\n'''\n\tFAST: estract the interest points with Features from Accelerated Segment Test\n'''\ndef interest_points(image, threshold = 10, N = 12, supression = False):\n\tkeyPoints=[]\n\tskip = 1\n\tif supression :\n\t\tskip = 2\n\tfor row in range(3,image.shape[0]-3,skip) :\n\t\tfor col in range(3, image.shape[1]-3,skip) :\n\t\t\tflag = is_interest_point(image, row, col, threshold, N)\n\t\t\tif flag:\n\t\t\t\tkeyPoints.append(cv2.KeyPoint(col,row, 5))\n\n\tif supression:\n\t\tsupressionPoints = []\n\t\tsupressionPoints = non_maximal_suppression(image,keyPoints,threshold)\n\t\tprint ('=====> {0:2d} Interest Point with non-Maximal Supression'.format(len(supressionPoints)))\n\t\treturn supressionPoints\n\telse :\n\t\tprint ('=====> {0:2d} Interest Point without non-Maximal Supression'.format(len(keyPoints)))\n\t\treturn keyPoints\n\n'''\n\tNon-Maximal Suppression over interest points\n'''\ndef non_maximal_suppression(image, keyPoints, threshold) :\n\tcont=0\n\tkeyPointsNMX=[]\n\tscore=np.zeros(image.shape)\n\n\tfor i in range(0,len(keyPoints)):\n\t\tscoreDark, scoreBrig = 0, 0\n\t\trow = int(keyPoints[i].pt[1])\n\t\tcol = int(keyPoints[i].pt[0])\n\t\tintensity = image[row,col]\n\t\tfor index in range(0, len(dx)):\n\t\t\tnew_row = row + dx[index]\n\t\t\tnew_col = col + dy[index]\n\t\t\tif (new_row>=0 and new_row=0 and new_col=0 and new_row=0 and new_colmaximoScore : \n\t\t\t\tmaximoScore = score[new_row,new_col]\n\t\t\t\tbreak\n\n\t\tif maximoScore == score[row,col] :\n\t\t\tkeyPointsNMX.append(cv2.KeyPoint(col,row,5))\n\n\treturn keyPointsNMX\n\n'''\n\tDetect if a pixel is or is not a interest point\n'''\ndef is_interest_point(image,row, col, threshold, N) :\n\tcountDark,countBrig=0,0\n\n\tfor index in range(0,16,4) :\n\t\tintensity = image[row,col]\n\t\tnew_intensity=image[row+dx[index],col+dy[index]]\t\t\n\t\tif new_intensity <= intensity-threshold :\n\t\t\tcountDark += 1\n\t\tif intensity + threshold <= new_intensity:\n\t\t\tcountBrig += 1\n # fast analysis with four point\n\tif countBrig < N/4 and countDark < N/4:\n\t\treturn False\n\n\tcountDark,countBrig,countInit=0,0,0\n\tflagFirst = True\n\ttypePixel= state_pixel(image[row,col], image[row+dx[0],col+dy[0]], threshold)\n\tflagIP = False\t\n\tfor index in range(0, len(dx)):\n\t\tintensity = image[row,col]\n\t\tnew_intensity=image[row+dx[index],col+dy[index]]\n\t\tstate = state_pixel(intensity, new_intensity, threshold)\n\t\tif flagFirst and state == typePixel:\n\t\t\tcountInit = countInit + 1\n\t\telse:\n\t\t\tflagFirst = False\n\n\t\tif state == 'd':\n\t\t\tcountDark = countDark + 1\n\t\t\tcountBrig = 0\n\t\telse :\n\t\t\tif state == 'b':\n\t\t\t\tcountBrig = countBrig + 1\n\t\t\t\tcountDark = 0\n\t\t\telse:\t\n\t\t\t\tcountBrig = 0\n\t\t\t\tcountDark = 0\n\t\tif countBrig >= N or countDark >=N :\n\t\t\treturn True\n\n\t\tif (countDark==1 or countBrig==1) and (state!=typePixel) and (16-index=N:\n\t\t\treturn True\n\t\telse :\n\t\t\tif typePixel == 'b' and countInit+countBrig>=N:\n\t\t\t\treturn True\n\treturn False\n\n'''\n\tState of a pixel(darker, similar, brighter)\n'''\ndef state_pixel(intensity, new_intensity, threshold):\n if new_intensity <= intensity-threshold :\n return 'd' # darker\n else :\n if intensity - threshold < new_intensity and new_intensity < intensity + threshold :\n return 's' # similar\n else :\n if intensity + threshold <= new_intensity :\n return 'b' # brighter\n return 's'\n\nif __name__ == \"__main__\":\n images_names = ['../input/fumar.jpg']\n image = cv2.imread(images_names[0], 0)\n keyPoints=interest_points(image, threshold = 30, N = 8, supression=True)\n plt.imshow(cv2.drawKeypoints(image, keyPoints, color=(0,255,0), flags=0))\n plt.show()\n","sub_path":"HW2/src/fast.py","file_name":"fast.py","file_ext":"py","file_size_in_byte":5197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"14060150","text":"import tensorflow as tf\nimport numpy as np\n\nfrom models.autoencoder_models import stacked_denoising_autoencoder\nfrom utils import datasets, utilities\n\n# #################### #\n# Flags definition #\n# #################### #\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\n# Global configuration\nflags.DEFINE_string('dataset', 'mnist', 'Which dataset to use. [\"mnist\", \"cifar10\", \"custom\"]')\nflags.DEFINE_string('train_dataset', '', 'Path to train set .npy file.')\nflags.DEFINE_string('train_labels', '', 'Path to train labels .npy file.')\nflags.DEFINE_string('valid_dataset', '', 'Path to valid set .npy file.')\nflags.DEFINE_string('valid_labels', '', 'Path to valid labels .npy file.')\nflags.DEFINE_string('test_dataset', '', 'Path to test set .npy file.')\nflags.DEFINE_string('test_labels', '', 'Path to test labels .npy file.')\nflags.DEFINE_string('cifar_dir', '', 'Path to the cifar 10 dataset directory.')\nflags.DEFINE_boolean('do_pretrain', True, 'Whether or not doing unsupervised pretraining.')\nflags.DEFINE_string('save_predictions', '', 'Path to a .npy file to save predictions of the model.')\nflags.DEFINE_string('save_layers_output', '', 'Path to a .npy file to save output from all the layers of the model.')\nflags.DEFINE_boolean('restore_previous_model', False, 'If true, restore previous model corresponding to model name.')\nflags.DEFINE_integer('seed', -1, 'Seed for the random generators (>= 0). Useful for testing hyperparameters.')\nflags.DEFINE_string('model_name', 'sdae', 'Name for the model.')\n\n# Supervised fine tuning parameters\nflags.DEFINE_string('finetune_loss_func', 'cross_entropy', 'Last Layer Loss function.[\"cross_entropy\", \"mean_squared\"]')\nflags.DEFINE_integer('finetune_num_epochs', 30, 'Number of epochs for the fine-tuning phase.')\nflags.DEFINE_float('finetune_learning_rate', 0.001, 'Learning rate for the fine-tuning phase.')\nflags.DEFINE_string('finetune_act_func', 'relu', 'Activation function for the fine-tuning phase.'\n '[\"sigmoid, \"tanh\", \"relu\"]')\nflags.DEFINE_float('dropout', 1, 'Dropout parameter.')\nflags.DEFINE_string('finetune_opt', 'gradient_descent', '[\"gradient_descent\", \"ada_grad\", \"momentum\", \"adam\"]')\nflags.DEFINE_integer('finetune_batch_size', 20, 'Size of each mini-batch for the fine-tuning phase.')\nflags.DEFINE_integer('verbose', 0, 'Level of verbosity. 0 - silent, 1 - print accuracy.')\nflags.DEFINE_string('main_dir', 'sdae/', 'Directory to store data relative to the algorithm.')\nflags.DEFINE_string('corr_type', 'none', 'Type of input corruption. [\"none\", \"masking\", \"salt_and_pepper\"]')\nflags.DEFINE_float('corr_frac', 0.0, 'Fraction of the input to corrupt.')\nflags.DEFINE_float('momentum', 0.5, 'Momentum parameter.')\n# Autoencoder layers specific parameters\nflags.DEFINE_string('layers', '256,', 'Comma-separated values for the layers in the sdae.')\nflags.DEFINE_float('l2reg', 5e-4, 'Regularization parameter for the autoencoders. If 0, no regularization.')\nflags.DEFINE_string('enc_act_func', 'sigmoid,', 'Activation function for the encoder. [\"sigmoid\", \"tanh\"]')\nflags.DEFINE_string('dec_act_func', 'none,', 'Activation function for the decoder. [\"sigmoid\", \"tanh\", \"none\"]')\nflags.DEFINE_string('loss_func', 'mean_squared,', 'Loss function. [\"mean_squared\" or \"cross_entropy\"]')\nflags.DEFINE_string('opt', 'gradient_descent,', '[\"gradient_descent\", \"ada_grad\", \"momentum\", \"adam\"]')\nflags.DEFINE_string('learning_rate', '0.01,', 'Initial learning rate.')\nflags.DEFINE_string('num_epochs', '10,', 'Number of epochs.')\nflags.DEFINE_string('batch_size', '10,', 'Size of each mini-batch.')\n\n# Conversion of Autoencoder layers parameters from string to their specific type\nlayers = [int(_) for _ in FLAGS.layers.split(',') if _]\nenc_act_func = [_ for _ in FLAGS.enc_act_func.split(',') if _]\ndec_act_func = [_ for _ in FLAGS.dec_act_func.split(',') if _]\nopt = [_ for _ in FLAGS.opt.split(',') if _]\nloss_func = [_ for _ in FLAGS.loss_func.split(',') if _]\nlearning_rate = [float(_) for _ in FLAGS.learning_rate.split(',') if _]\nnum_epochs = [int(_) for _ in FLAGS.num_epochs.split(',') if _]\nbatch_size = [int(_) for _ in FLAGS.batch_size.split(',') if _]\n\n# Parameters normalization: if a parameter is not specified, it must be made of the same length of the others\ndae_params = {'layers': layers, 'enc_act_func': enc_act_func,\n 'dec_act_func': dec_act_func, 'loss_func': loss_func, 'learning_rate': learning_rate,\n 'opt': opt, 'num_epochs': num_epochs, 'batch_size': batch_size}\n\nfor p in dae_params:\n if len(dae_params[p]) != len(layers):\n # The current parameter is not specified by the user, should default it for all the layers\n dae_params[p] = [dae_params[p][0] for _ in layers]\n\n# Parameters validation\nassert 0. <= FLAGS.corr_frac <= 1.\nassert FLAGS.corr_type in ['masking', 'salt_and_pepper', 'none']\nassert FLAGS.dataset in ['mnist', 'cifar10', 'custom']\nassert len(layers) > 0\nassert all([af in ['sigmoid', 'tanh'] for af in enc_act_func])\nassert all([af in ['sigmoid', 'tanh', 'none'] for af in dec_act_func])\nassert all([lf in ['cross_entropy', 'mean_squared'] for lf in loss_func])\nassert FLAGS.finetune_opt in ['gradient_descent', 'ada_grad', 'momentum', 'adam']\n\nif __name__ == '__main__':\n\n utilities.random_seed_np_tf(FLAGS.seed)\n\n if FLAGS.dataset == 'mnist':\n\n # ################# #\n # MNIST Dataset #\n # ################# #\n\n trX, trY, vlX, vlY, teX, teY = datasets.load_mnist_dataset(mode='supervised')\n\n elif FLAGS.dataset == 'cifar10':\n\n # ################### #\n # Cifar10 Dataset #\n # ################### #\n\n trX, trY, teX, teY = datasets.load_cifar10_dataset(FLAGS.cifar_dir, mode='supervised')\n # Validation set is the first half of the test set\n vlX = teX[:5000]\n vlY = teY[:5000]\n\n elif FLAGS.dataset == 'custom':\n\n # ################## #\n # Custom Dataset #\n # ################## #\n\n def load_from_np(dataset_path):\n if dataset_path != '':\n return np.load(dataset_path)\n else:\n return None\n\n trX, trY = load_from_np(FLAGS.train_dataset), load_from_np(FLAGS.train_labels)\n vlX, vlY = load_from_np(FLAGS.valid_dataset), load_from_np(FLAGS.valid_labels)\n teX, teY = load_from_np(FLAGS.test_dataset), load_from_np(FLAGS.test_labels)\n\n else:\n trX = None\n trY = None\n vlX = None\n vlY = None\n teX = None\n teY = None\n\n # Create the object\n sdae = None\n\n sdae = stacked_denoising_autoencoder.StackedDenoisingAutoencoder(\n do_pretrain=FLAGS.do_pretrain, model_name=FLAGS.model_name,\n layers=dae_params['layers'], finetune_loss_func=FLAGS.finetune_loss_func,\n finetune_learning_rate=FLAGS.finetune_learning_rate, finetune_num_epochs=FLAGS.finetune_num_epochs,\n finetune_opt=FLAGS.finetune_opt, finetune_batch_size=FLAGS.finetune_batch_size, dropout=FLAGS.dropout,\n enc_act_func=dae_params['enc_act_func'], dec_act_func=dae_params['dec_act_func'],\n corr_type=FLAGS.corr_type, corr_frac=FLAGS.corr_frac, autoencoders_l2reg=FLAGS.l2reg,\n dataset=FLAGS.dataset, loss_func=dae_params['loss_func'], main_dir=FLAGS.main_dir, opt=dae_params['opt'],\n learning_rate=dae_params['learning_rate'], momentum=FLAGS.momentum, verbose=FLAGS.verbose,\n num_epochs=dae_params['num_epochs'], batch_size=dae_params['batch_size'],\n finetune_act_func=FLAGS.finetune_act_func)\n\n # Fit the model (unsupervised pretraining)\n if FLAGS.do_pretrain:\n encoded_X, encoded_vX = sdae.pretrain(trX, vlX)\n\n # Supervised finetuning\n sdae.build_model(trX.shape[1], trY.shape[1])\n\n sdae.fit(trX, trY, vlX, vlY, restore_previous_model=FLAGS.restore_previous_model)\n\n # Compute the accuracy of the model\n print('Test set accuracy: {}'.format(sdae.compute_accuracy(teX, teY)))\n\n # Save the predictions of the model\n if FLAGS.save_predictions:\n print('Saving the predictions for the test set...')\n np.save(FLAGS.save_predictions, sdae.predict(teX))\n\n # Save output from each layer of the model\n if FLAGS.save_layers_output:\n print('Saving the output of each layer for the test set')\n out = sdae.get_layers_output(teX)\n for i, o in enumerate(out):\n np.save(FLAGS.save_layers_output + '-layer-' + str(i + 1), o)\n\n\n\n","sub_path":"GH_buggy_examples/DLT_20d1b59/command_line/run_stacked_autoencoder_supervised.py","file_name":"run_stacked_autoencoder_supervised.py","file_ext":"py","file_size_in_byte":8463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"485622403","text":"import os\r\nimport csv\r\n\r\n# Path to collect data from the Resources folder\r\nbudget = os.path.join('..', 'Resources', 'budget_data.csv')\r\n\r\n# Read in the CSV file\r\nwith open(budget, newline=\"\") as csvfile:\r\n csvreader = csv.reader(csvfile, delimiter=',')\r\n header = next(csvreader)\r\n count = 0\r\n total = 0\r\n max = 0\r\n min = 0\r\n profit = 0\r\n\r\n # Loop through the data\r\n for row in csvreader:\r\n #average block\r\n count += 1\r\n total += int(row[1])\r\n\r\n #max min block\r\n if int(row[1]) > max:\r\n max = int(row[1])\r\n if int(row[1]) < min:\r\n min = int(row[1])\r\n totalchange = max - min\r\n\r\n #Greatest increase pseudo block\r\n # Create a variable\r\n # == profit\r\n # iterate through the array finding the difference between n+1 and n.\r\n # == within the for loop going row by row. row[1] is the column but\r\n # how do we get the next value?\r\n # if this value is greater than the original value of the variable\r\n #if difference > Profit\r\n # then set the variable to this new value\r\n # == profit = difference\r\n #Apply logit to get decrease, flip inequality\r\n\r\n\r\n print(\"Financial Analysis\")\r\n print(\"----------------------------\")\r\n print(f\"Total Months: {str(count)}\")\r\n print(f\"Total: ${str(total)}\")\r\n print(f\"Average Change: ${str(totalchange/count)}\")\r\n print(f\"Greatest Increase in Profits: \")\r\n print(f\"Greatest Decrease in Profits: \")\r\n\r\noutput_path = os.path.join(\"..\", \"Output\", \"PyBank.txt\")\r\n\r\n# Open the file using \"write\" mode. Specify the variable to hold the contents\r\nwith open(output_path, 'w', newline='') as csvfile:\r\n\r\n # Initialize csv.writer\r\n csvwriter = csv.writer(csvfile, delimiter=',')\r\n\r\n # Write\r\n csvwriter.writerow(['Financial Analysis'])\r\n csvwriter.writerow(['----------------------------'])\r\n csvwriter.writerow(['Total Months: ' + str(count)])\r\n csvwriter.writerow(['Total: $' + str(total)])\r\n csvwriter.writerow(['Average Change: $' + str(totalchange/count)])\r\n csvwriter.writerow(['Greatest Increase in Profits: '])\r\n csvwriter.writerow(['Greatest Decrease in Profits: '])\r\n","sub_path":"HW/pybank.py","file_name":"pybank.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"91253892","text":"\n\n#calss header\nclass _SACK():\n\tdef __init__(self,): \n\t\tself.name = \"SACK\"\n\t\tself.definitions = [u'to remove someone from a job, usually because they have done something wrong or badly, or sometimes as a way of saving the cost of employing them: ', u'in American football, to attack the quarterback in order to prevent him from throwing the ball: ', u'to attack a building or town, causing a lot of destruction and stealing many valuable things: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_sack.py","file_name":"_sack.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"593217163","text":"import numpy as np\nimport matplotlib.pylab as plt\nimport healpy as hp\nfrom lsst.sims.featureScheduler.modelObservatory import Model_observatory\nfrom lsst.sims.featureScheduler.schedulers import Core_scheduler\nfrom lsst.sims.featureScheduler.utils import standard_goals, calc_norm_factor\nimport lsst.sims.featureScheduler.basis_functions as bf\nfrom lsst.sims.featureScheduler.surveys import (Greedy_survey, Blob_survey)\nfrom lsst.sims.featureScheduler import sim_runner\nimport sys\nimport subprocess\nimport os\nimport argparse\nimport lsst.sims.featureScheduler.basis_functions as basis_functions\nfrom lsst.sims.featureScheduler.surveys import Deep_drilling_survey\n\n\n# I should maybe say, if I'm more than 4 sequences below the fractional limit, lift the force delay.\n# Basically combine the fraction_of_obs and Fore delay into a single basis function. let's start by testing \n# If just going to 1-day delay helps\n\n\ndef dd_bfs(RA, dec, survey_name, ha_limits, frac_total=0.0185):\n \"\"\"\n Convienence function to generate all the feasibility basis functions\n \"\"\"\n bfs = []\n bfs.append(basis_functions.Filter_loaded_basis_function(filternames=['r', 'g', 'i', 'z', 'y']))\n bfs.append(basis_functions.Not_twilight_basis_function(sun_alt_limit=-18))\n bfs.append(basis_functions.Time_to_twilight_basis_function(time_needed=62.))\n bfs.append(basis_functions.Force_delay_basis_function(days_delay=0.1, survey_name=survey_name))\n bfs.append(basis_functions.Hour_Angle_limit_basis_function(RA=RA, ha_limits=ha_limits))\n bfs.append(basis_functions.Fraction_of_obs_basis_function(frac_total=frac_total, survey_name=survey_name))\n\n return bfs\n\n\ndef dd_u_bfs(RA, dec, survey_name, ha_limits, frac_total=0.0015):\n \"\"\"Convienence function to generate all the feasibility basis functions for u-band DDFs\n \"\"\"\n bfs = []\n bfs.append(basis_functions.Filter_loaded_basis_function(filternames='u'))\n bfs.append(basis_functions.Not_twilight_basis_function(sun_alt_limit=-18))\n bfs.append(basis_functions.Time_to_twilight_basis_function(time_needed=6.))\n bfs.append(basis_functions.Hour_Angle_limit_basis_function(RA=RA, ha_limits=ha_limits))\n\n bfs.append(basis_functions.Force_delay_basis_function(days_delay=1., survey_name=survey_name))\n bfs.append(basis_functions.Moon_down_basis_function())\n bfs.append(basis_functions.Fraction_of_obs_basis_function(frac_total=frac_total, survey_name=survey_name))\n\n return bfs\n\n\ndef generate_dd_surveys(nside=None, nexp=2, detailers=None):\n \"\"\"Utility to return a list of standard deep drilling field surveys.\n\n XXX-Someone double check that I got the coordinates right!\n\n \"\"\"\n\n surveys = []\n\n # ELAIS S1\n RA = 9.45\n dec = -44.\n survey_name = 'DD:ELAISS1'\n ha_limits = ([0., 1.18], [21.82, 24.])\n bfs = dd_bfs(RA, dec, survey_name, ha_limits)\n surveys.append(Deep_drilling_survey(bfs, RA, dec, sequence='rgizy',\n nvis=[20, 10, 20, 26, 20],\n survey_name=survey_name, reward_value=100,\n nside=nside, nexp=nexp, detailers=detailers))\n\n survey_name = 'DD:u,ELAISS1'\n bfs = dd_u_bfs(RA, dec, survey_name, ha_limits)\n\n surveys.append(Deep_drilling_survey(bfs, RA, dec, sequence='u',\n nvis=[7], survey_name=survey_name, reward_value=100, nside=nside,\n nexp=nexp, detailers=detailers))\n\n # XMM-LSS\n survey_name = 'DD:XMM-LSS'\n RA = 35.708333\n dec = -4-45/60.\n ha_limits = ([0., 1.3], [21.7, 24.])\n bfs = dd_bfs(RA, dec, survey_name, ha_limits)\n\n surveys.append(Deep_drilling_survey(bfs, RA, dec, sequence='rgizy',\n nvis=[20, 10, 20, 26, 20], survey_name=survey_name, reward_value=100,\n nside=nside, nexp=nexp, detailers=detailers))\n survey_name = 'DD:u,XMM-LSS'\n bfs = dd_u_bfs(RA, dec, survey_name, ha_limits)\n\n surveys.append(Deep_drilling_survey(bfs, RA, dec, sequence='u',\n nvis=[7], survey_name=survey_name, reward_value=100, nside=nside,\n nexp=nexp, detailers=detailers))\n\n # Extended Chandra Deep Field South\n RA = 53.125\n dec = -28.-6/60.\n survey_name = 'DD:ECDFS'\n ha_limits = [[0.5, 3.0], [20., 22.5]]\n bfs = dd_bfs(RA, dec, survey_name, ha_limits)\n surveys.append(Deep_drilling_survey(bfs, RA, dec, sequence='rgizy',\n nvis=[20, 10, 20, 26, 20],\n survey_name=survey_name, reward_value=100, nside=nside,\n nexp=nexp, detailers=detailers))\n\n survey_name = 'DD:u,ECDFS'\n bfs = dd_u_bfs(RA, dec, survey_name, ha_limits)\n surveys.append(Deep_drilling_survey(bfs, RA, dec, sequence='u',\n nvis=[7], survey_name=survey_name, reward_value=100, nside=nside,\n nexp=nexp, detailers=detailers))\n # COSMOS\n RA = 150.1\n dec = 2.+10./60.+55/3600.\n survey_name = 'DD:COSMOS'\n ha_limits = ([0., 1.5], [21.5, 24.])\n bfs = dd_bfs(RA, dec, survey_name, ha_limits)\n surveys.append(Deep_drilling_survey(bfs, RA, dec, sequence='rgizy',\n nvis=[20, 10, 20, 26, 20],\n survey_name=survey_name, reward_value=100, nside=nside,\n nexp=nexp, detailers=detailers))\n survey_name = 'DD:u,COSMOS'\n bfs = dd_u_bfs(RA, dec, survey_name, ha_limits)\n surveys.append(Deep_drilling_survey(bfs, RA, dec, sequence='u',\n nvis=[7], survey_name=survey_name, reward_value=100, nside=nside,\n nexp=nexp, detailers=detailers))\n\n # Extra DD Field, just to get to 5. Still not closed on this one\n survey_name = 'DD:290'\n RA = 349.386443\n dec = -63.321004\n ha_limits = ([0., 0.5], [23.5, 24.])\n bfs = dd_bfs(RA, dec, survey_name, ha_limits)\n surveys.append(Deep_drilling_survey(bfs, RA, dec, sequence='rgizy',\n nvis=[20, 10, 20, 26, 20],\n survey_name=survey_name, reward_value=100, nside=nside,\n nexp=nexp, detailers=detailers))\n\n survey_name = 'DD:u,290'\n bfs = dd_u_bfs(RA, dec, survey_name, ha_limits)\n surveys.append(Deep_drilling_survey(bfs, RA, dec, sequence='u', nvis=[7],\n survey_name=survey_name, reward_value=100, nside=nside,\n nexp=nexp, detailers=detailers))\n\n return surveys\n\n\ndef gen_greedy_surveys(nside, nexp=1):\n \"\"\"\n Make a quick set of greedy surveys\n \"\"\"\n target_map = standard_goals(nside=nside)\n norm_factor = calc_norm_factor(target_map)\n # Let's remove the bluer filters since this should only be near twilight\n filters = ['r', 'i', 'z', 'y']\n surveys = []\n\n for filtername in filters:\n bfs = []\n bfs.append(bf.M5_diff_basis_function(filtername=filtername, nside=nside))\n bfs.append(bf.Target_map_basis_function(filtername=filtername,\n target_map=target_map[filtername],\n out_of_bounds_val=np.nan, nside=nside,\n norm_factor=norm_factor))\n bfs.append(bf.Slewtime_basis_function(filtername=filtername, nside=nside))\n bfs.append(bf.Strict_filter_basis_function(filtername=filtername))\n # Masks, give these 0 weight\n bfs.append(bf.Zenith_shadow_mask_basis_function(nside=nside, shadow_minutes=60., max_alt=76.))\n bfs.append(bf.Moon_avoidance_basis_function(nside=nside, moon_distance=40.))\n\n bfs.append(bf.Filter_loaded_basis_function(filternames=filtername))\n\n weights = np.array([3.0, 0.3, 3., 3., 0., 0., 0.])\n surveys.append(Greedy_survey(bfs, weights, block_size=1, filtername=filtername,\n dither=True, nside=nside, ignore_obs='DD', nexp=nexp))\n\n return surveys\n\n\ndef generate_blobs(nside, mixed_pairs=False, nexp=1, no_pairs=False):\n target_map = standard_goals(nside=nside)\n norm_factor = calc_norm_factor(target_map)\n\n # List to hold all the surveys (for easy plotting later)\n surveys = []\n\n # Set up observations to be taken in blocks\n filter1s = ['u', 'g', 'r', 'i', 'z', 'y']\n if mixed_pairs:\n filter2s = [None, 'r', 'i', 'z', None, None]\n else:\n filter2s = [None, 'g', 'r', 'i', None, None]\n\n if no_pairs:\n filter2s = [None, None, None, None, None, None]\n\n # Ideal time between taking pairs\n pair_time = 22.\n times_needed = [pair_time, pair_time*2]\n for filtername, filtername2 in zip(filter1s, filter2s):\n bfs = []\n bfs.append(bf.M5_diff_basis_function(filtername=filtername, nside=nside))\n if filtername2 is not None:\n bfs.append(bf.M5_diff_basis_function(filtername=filtername2, nside=nside))\n bfs.append(bf.Target_map_basis_function(filtername=filtername,\n target_map=target_map[filtername],\n out_of_bounds_val=np.nan, nside=nside,\n norm_factor=norm_factor))\n if filtername2 is not None:\n bfs.append(bf.Target_map_basis_function(filtername=filtername2,\n target_map=target_map[filtername2],\n out_of_bounds_val=np.nan, nside=nside,\n norm_factor=norm_factor))\n bfs.append(bf.Slewtime_basis_function(filtername=filtername, nside=nside))\n bfs.append(bf.Strict_filter_basis_function(filtername=filtername))\n # Masks, give these 0 weight\n bfs.append(bf.Zenith_shadow_mask_basis_function(nside=nside, shadow_minutes=60., max_alt=76.))\n bfs.append(bf.Moon_avoidance_basis_function(nside=nside, moon_distance=30.))\n filternames = [fn for fn in [filtername, filtername2] if fn is not None]\n bfs.append(bf.Filter_loaded_basis_function(filternames=filternames))\n if filtername2 is None:\n time_needed = times_needed[0]\n else:\n time_needed = times_needed[1]\n bfs.append(bf.Time_to_twilight_basis_function(time_needed=time_needed))\n bfs.append(bf.Not_twilight_basis_function())\n weights = np.array([3.0, 3.0, .3, .3, 3., 3., 0., 0., 0., 0., 0.])\n if filtername2 is None:\n # Need to scale weights up so filter balancing still works properly.\n weights = np.array([6.0, 0.6, 3., 3., 0., 0., 0., 0., 0.])\n if filtername2 is None:\n survey_name = 'blob, %s' % filtername\n else:\n survey_name = 'blob, %s%s' % (filtername, filtername2)\n surveys.append(Blob_survey(bfs, weights, filtername1=filtername, filtername2=filtername2,\n ideal_pair_time=pair_time, nside=nside,\n survey_note=survey_name, ignore_obs='DD', dither=True,\n nexp=nexp))\n\n return surveys\n\n\ndef run_sched(surveys, survey_length=365.25, nside=32, fileroot='baseline_', verbose=False,\n extra_info=None):\n years = np.round(survey_length/365.25)\n scheduler = Core_scheduler(surveys, nside=nside)\n n_visit_limit = None\n observatory = Model_observatory(nside=nside)\n observatory, scheduler, observations = sim_runner(observatory, scheduler,\n survey_length=survey_length,\n filename=fileroot+'%iyrs.db' % years,\n delete_past=True, n_visit_limit=n_visit_limit,\n verbose=verbose, extra_info=extra_info)\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--nexp\", type=int, default=1, help=\"Number of exposures per visit\")\n parser.add_argument(\"--Pairs\", dest='pairs', action='store_true')\n parser.add_argument(\"--noPairs\", dest='pairs', action='store_false')\n parser.set_defaults(pairs=True)\n parser.add_argument(\"--mixedPairs\", dest='mixedPairs', action='store_true')\n parser.add_argument(\"--nomixedPairs\", dest='mixedPairs', action='store_false')\n parser.set_defaults(mixedPairs=True)\n parser.add_argument(\"--verbose\", dest='verbose', action='store_true')\n parser.set_defaults(verbose=False)\n parser.add_argument(\"--survey_length\", type=float, default=365.25*10)\n parser.add_argument(\"--outDir\", type=str, default=\"\")\n\n args = parser.parse_args()\n nexp = args.nexp\n Pairs = args.pairs\n mixedPairs = args.mixedPairs\n survey_length = args.survey_length # Days\n outDir = args.outDir\n verbose = args.verbose\n\n nside = 32\n\n extra_info = {}\n exec_command = ''\n for arg in sys.argv:\n exec_command += ' ' + arg\n extra_info['exec command'] = exec_command\n extra_info['git hash'] = subprocess.check_output(['git', 'rev-parse', 'HEAD'])\n extra_info['file executed'] = os.path.realpath(__file__)\n\n fileroot = 'ddfdebug_'\n\n if nexp > 1:\n fileroot += '%iexp' % nexp\n greedy = gen_greedy_surveys(nside, nexp=nexp)\n ddfs = generate_dd_surveys(nside=nside, nexp=nexp)\n blobs = generate_blobs(nside, nexp=nexp, mixed_pairs=True)\n surveys = [ddfs, blobs, greedy]\n run_sched(surveys, survey_length=survey_length, verbose=verbose,\n fileroot=os.path.join(outDir, fileroot), extra_info=extra_info,\n nside=nside)\n","sub_path":"scratch/ddf_debug/ddf_debug.py","file_name":"ddf_debug.py","file_ext":"py","file_size_in_byte":13976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"202980451","text":"import unittest\nfrom pyapprox.karhunen_loeve_expansion import *\n\n\nclass TestKLE(unittest.TestCase):\n\n def setUp(self):\n np.random.seed(1)\n\n def test_multivariate_chain_rule(self):\n r\"\"\"\n Test computing gradient using multivariate chain rule.\n\n .. math:: y(u_1,u_2)=u_1^2+u_2*y, u_1(x_1,x_2)=x_1\\sin(x_2), u_2(x_1,x_2)=sin^2(x_2)\n\n .. math:: \n\n \\frac{\\partial u}{\\partial r} = \\frac{\\partial u}{\\partial x}\\frac{\\partial x}{\\partial r} + \\frac{\\partial u}{\\partial y}\\frac{\\partial y}{\\partial r} = (2x)(\\sin(t)+(2)(0)=2r\\sin^2(t)\n\n .. math::\n\n \\frac{\\partial u}{\\partial t} = \\frac{\\partial u}{\\partial x}\\frac{\\partial x}{\\partial t} + \\frac{\\partial u}{\\partial y}\\frac{\\partial y}{\\partial t} = (2x)(r\\cos(t)+(2)(2\\sin(t)\\cos(t))=(r^2+2)\\sin(2t)\n\n \"\"\"\n def yfun(u):\n return u[0]**2+2.*u[1]\n\n def ufun(x): return np.array(\n [x[0]*np.sin(x[1]), np.sin(x[1])**2])\n\n sample = np.random.normal(0., 1., (2))\n\n exact_gradient = np.array(\n [2.*sample[0]*np.sin(sample[1])**2,\n (sample[0]**2+2.)*np.sin(2.*sample[1])])\n \n uvec = ufun(sample)\n jac_yu = np.array([2*uvec[0], 2.])\n jac_ux = np.array(\n [np.array([np.sin(sample[1]), 0.]),\n np.array([sample[0]*np.cos(sample[1]), np.sin(2.*sample[1])])]).T\n\n gradient = multivariate_chain_rule(jac_yu, jac_ux)\n assert np.allclose(exact_gradient, gradient, atol=1e-7)\n\n def test_compute_kle_gradient_from_mesh_gradient(self):\n nvars, sigma = 2, 3\n length_scale = 1\n mesh = np.linspace(0., 1., 11)[None, :]\n kle_mean = mesh[0, :]+2\n\n for use_log in [False, True]:\n kle = MeshKLE(mesh, kle_mean, use_log)\n kle.compute_basis(length_scale, sigma, nvars)\n\n def scalar_function_of_field(field):\n return np.dot(field[:, 0], field[:, 0])\n\n sample = np.random.normal(0. ,1., (nvars, 1))\n kle_vals = kle(sample)\n\n from pyapprox.optimization import approx_jacobian\n mesh_gradient = kle_vals.T*2\n assert np.allclose(\n mesh_gradient,\n approx_jacobian(scalar_function_of_field, kle_vals), atol=1e-7)\n\n gradient = compute_kle_gradient_from_mesh_gradient(\n mesh_gradient, kle.eig_vecs, kle.mean_field,\n kle.use_log, sample[:, 0])\n\n def scalar_function_of_sample(sample):\n field = kle(sample)\n return scalar_function_of_field(field)\n\n fd_gradient = approx_jacobian(scalar_function_of_sample, sample)\n # print((fd_gradient, gradient))\n assert np.allclose(fd_gradient, gradient)\n \n\nif __name__ == \"__main__\":\n kle_test_suite = unittest.TestLoader().loadTestsFromTestCase(\n TestKLE)\n unittest.TextTestRunner(verbosity=2).run(kle_test_suite)\n","sub_path":"pyapprox/tests/test_karhunen_loeve.py","file_name":"test_karhunen_loeve.py","file_ext":"py","file_size_in_byte":2979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"152401454","text":"from time import process_time as TIME\n\n\ndef import_test_cases():\n f = open(\"54.txt\", \"r\")\n tc = f.readlines()\n for i in range(len(tc)):\n tc[i] = tc[i].rstrip(\"\\n\")\n return tc\n\n\ndef format_player(p):\n l = []\n for i in range(1, 6):\n l.append(p[3*(i-1):3*i].strip())\n return l\n\n\nalpha = {\n \"T\": 10,\n \"J\": 11,\n \"Q\": 12,\n \"K\": 13,\n \"A\": 14\n}\n\n\ndef number_n_suit(p):\n number = []; suit = []\n for c in p:\n\n if c[0].isnumeric():\n number.append(int(c[0]))\n else:\n number.append(alpha[c[0]])\n\n suit.append(c[1])\n return number, suit\n\n\ndef has_multi(p):\n number, suit = number_n_suit(p)\n # print(number, suit)\n pair = []\n three_oak = []\n four_oak = []\n\n if len(number) == len(set(number)):\n return pair, three_oak, four_oak\n\n for num in set(number):\n if number.count(num) == 2:\n pair.append(num)\n elif number.count(num) == 3:\n three_oak.append(num)\n elif number.count(num) == 4:\n four_oak.append(num)\n return pair, three_oak, four_oak\n\n\ndef is_fullhouse(p):\n if not has_multi(p):\n return False\n else:\n pair, three_oak, four_oak = has_multi(p)\n\n if len(pair) == 1 and len(three_oak) == 1:\n return three_oak\n\n return False\n\n\ndef is_straight(p):\n number, suit = number_n_suit(p)\n number.sort()\n\n for i in range(4):\n if number[i+1] - number[i] != 1:\n return False\n return number[-1] # max number of straight\n\n\ndef is_flush(p):\n number, suit = number_n_suit(p)\n if len(set(suit)) == 1:\n return suit[0]\n return False\n\n# is_flush([\"8C\", \"KC\", \"7C\", \"TC\", \"2C\"])\n\n\ndef is_royal_flush(p):\n number, suit = number_n_suit(p)\n number.sort()\n if number[0] == 10 and number[-1] == 14:\n return suit[0]\n\n\ndef find_max(p):\n number, suit = number_n_suit(p)\n return max(number)\n\n\ndef rank(p):\n flush = is_flush(p)\n straight = is_straight(p)\n if flush and straight:\n if is_royal_flush(p):\n return 10, is_royal_flush(p)\n else:\n return 9, straight\n\n pair, three_oak, four_oak = has_multi(p)\n if len(four_oak) == 1:\n return 8, four_oak[0]\n\n # check full house\n if len(pair) == 1 and len(three_oak) == 1:\n return 7, three_oak[0]\n\n if flush:\n return 6, flush\n if straight:\n return 5, straight\n\n if len(three_oak) == 1:\n return 4, three_oak[0]\n if len(pair) == 2:\n # print(pair)\n return 3, max(pair)\n if len(pair) == 1:\n return 2, pair[0]\n return 1, find_max(p)\n\n\n# player_1 = ['9C', 'KS', 'KC', '9H', 'AS']\n# player_2 = ['7D', '2S', '5D', '3S', 'AC']\n# print(rank(player_1), rank(player_2))\n\nclass Player:\n\n def __init__(self, cards, rank=0, score=0):\n self.cards = cards\n self.rank = rank\n self.score = score\n\n def identify(self):\n rk, sc = rank(self.cards)\n self.rank = rk\n self.score = sc\n\n\ntest_cases = import_test_cases()\n\nsame = 0; different = 0\nwin_player_1 = 0; win_player_2 = 0\nloop = 0; count_error = 0\n\nfor tc in test_cases:\n # if loop > 5:\n # break\n # else:\n # loop += 1\n\n player_1 = Player(format_player(tc[:14]))\n player_2 = Player(format_player(tc[15:]))\n player_1.identify()\n player_2.identify()\n # print(player_1.rank, player_2.rank)\n # player_1.identify(); player_2.identify()\n # print(player_1.rank, player_2.rank)\n\n if player_1.rank == player_2.rank:\n same += 1\n\n # if player_1.rank == 5:\n # print(player_1.cards, player_1.score)\n # print(player_2.cards, player_2.score)\n\n print(player_1.score, player_2.score)\n if player_1.score > player_2.score:\n win_player_1 += 1\n elif player_1.score < player_2.score:\n win_player_2 += 1\n else:\n count_error += 1\n print(player_1.cards, player_1.score)\n print(player_2.cards, player_2.score)\n else:\n if player_1.rank > player_2.rank:\n win_player_1 += 1\n elif player_1.rank < player_2.rank:\n win_player_2 += 1\n else:\n count_error += 1\n print(player_1.cards, player_1.score)\n print(player_2.cards, player_2.score)\n\nprint()\nprint(count_error)\nprint(same)\nprint(win_player_1, win_player_2)\nprint(\"EXECUTED IN\", TIME())\n\n# win_player_1 + 1 is the actual answer\n# Haven't fixed case same pairs\n# Only one case\n# same 5 pair\n# then check for single highest card\n","sub_path":"beforeSIIT/54.py","file_name":"54.py","file_ext":"py","file_size_in_byte":4599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"182110539","text":"# クラス\nclass Student():\n def __init__(self,name): # コンストラクタ\n self.name = name # アトリビュート\n\n # 引数計算\n def calculate_Avg(self,data):\n sum = 0\n\n # 合算\n for num in data:\n sum += num\n\n # 引数分で割る\n avg = sum/len(data)\n return avg\n\n # 判定\n def judge(self,avg):\n if(avg >= 60):\n result = \"passed\"\n else:\n result = \"failed\"\n return result\n\n# メイン処理\n# 判定:passed\na001 = Student(\"Sato\")\ndata = [70,65,50,90,30]\n\navg = a001.calculate_Avg(data)\nresult = a001.judge(avg)\n\nprint(avg)\nprint(a001.name+\" \"+result)\n\n# 判定:failed\na002 = Student(\"Tanaka\")\ndata = [70,65,50,10,30]\n\navg = a002.calculat_eAvg(data)\nresult = a002.judge(avg)\n\nprint(avg)\nprint(a002.name+\" \"+result)\n","sub_path":"YouTube/TEST.py","file_name":"TEST.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"521661638","text":"'''\n2.\n년, 월, 일을 입력하면 그 날이 무슨 요일인지 출력하는 함수를 만드세요.\n\n\n테스트코드\n<입력>\nprint(\"%d년 %d월 %d일은 %s 입니다.\" % (myYear, myMonth, myDay, printDayOfTheWeek(myYear, myMonth, myDay)))\n\n<출력>\n연도를 입력하시오 : 2020\n월을 입력하시오 : 3\n일을 입력하시오 : 13\n2020년 3월 13일은 금요일 입니다.\n\n'''\nimport datetime\n\nmyYear = int(input('연도를 입력하시오 : '))\nmyMonth = int(input('월을 입력하시오 : '))\nmyDay = int(input('일을 입력하시오 : '))\n\ndef printDayOfTheWeek(y, m, d):\n standard_day = datetime.datetime(year=2020, month=3, day=16)\n target_day = datetime.datetime(year=y, month=m, day=d)\n\n DayOfTheWeek = ['월요일', '화요일', '수요일', '목요일', '금요일', '토요일', '일요일']\n diff = (target_day - standard_day).days\n\n return DayOfTheWeek[diff % 7]\n\nprint(\"%d년 %d월 %d일은 %s 입니다.\" % (myYear, myMonth, myDay, printDayOfTheWeek(myYear, myMonth, myDay)))\n\n\n\n","sub_path":"박석채/quiz2_02.py","file_name":"quiz2_02.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"365398576","text":"from django.shortcuts import render, get_object_or_404\nfrom .forms import StyledRegistrationForm, ProfileForm, StyledAuthenticationForm\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.auth.forms import AuthenticationForm, UserChangeForm\nfrom django.contrib.auth import login, authenticate, logout\nfrom django.contrib.auth.decorators import login_required\nfrom .models import UserProfile, BlogPost\nfrom django.core.exceptions import ObjectDoesNotExist\n\n\n\n\n# Create your views here.\n\n# Homepage with login form\ndef landing(request):\n\n title = 'Power your community | Volward'\n\n if request.method == \"POST\":\n\n \tlogin_form = AuthenticationForm(data=request.POST)\n \tif login_form.is_valid():\n \t\tusername=request.POST['username']\n \t\tpassword=request.POST['password']\n\n \t\tuser = authenticate(username=username, password=password)\n \t\tif user.is_active:\n \t\t\tlogin(request, user)\n\n else:\n \tlogin_form=AuthenticationForm()\n\n return render(request,\n 'app/home.html',\n {'title': title,'login_form':login_form})\n\n# Registration page\ndef signup(request):\n\n\ttitle = \"Sign Up | Volward\"\n\n\tif request.method == \"POST\":\n\t\tform = StyledRegistrationForm(data=request.POST)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn HttpResponseRedirect('/registration_complete/')\n\n\telse:\n\n\t\tform = StyledRegistrationForm()\n\n\treturn render(request,\n\t\t'app/signup.html',\n\t\t{'title':title, 'form':form})\n\n\n# Registration success\ndef registration_complete(request):\n\n\ttitle = \"Welcome | Volward\"\n\treturn render(request,\n\t\t'app/registration_complete.html',\n\t\t{'title':title,})\n\n# Login redirect page\ndef signin(request):\n\n title = 'Power your community | Volward'\n\n if request.method == \"POST\":\n\n \tlogin_form = StyledAuthenticationForm(data=request.POST)\n \tif login_form.is_valid():\n \t\tusername=request.POST['username']\n \t\tpassword=request.POST['password']\n\n \t\tuser = authenticate(username=username, password=password)\n \t\tif user.is_active:\n \t\t\tlogin(request, user)\n\n else:\n \tlogin_form=StyledAuthenticationForm()\n\n return render(request,\n 'app/signin.html',\n {'title': title,'login_form':login_form})\n\n@login_required\ndef user_logout(request):\n\n logout(request)\n # Take the user to exit page.\n return HttpResponseRedirect('/bye')\n\ndef logout_success(request):\n\n\ttitle = 'See you soon | Volward'\n\n\treturn render(request,\n\t\t'app/logout.html',\n\t\t{'title':title})\n\ndef view_profile(request, slug):\n\t# Get the profile from the slug in url.\n\tprofile = get_object_or_404(UserProfile, slug=slug)\n\ttitle = profile.user.username\n\n\treturn render(request,\n\t\t'app/view_profile.html',\n\t\t{'title':title})\n\n@login_required\ndef edit_profile(request):\n\n\tUserProfile.objects.get_or_create(user=request.user)\n\n\ttitle='Edit your profile | Volward'\n\tobj = get_object_or_404(UserProfile, user=request.user)\n\tprofile_form = ProfileForm(request.POST or None, request.FILES or None, instance=obj)\n\n\tif request.method==\"POST\":\n\t\tif profile_form.is_valid():\n\t\t\tprofile=profile_form.save(commit=False)\n\t\t\tprofile.user=request.user \n\t\t\tprofile.save()\n\n\n\treturn render(request,\n\t\t'app/edit_profile.html',\n\t\t{'title':title,\n\t\t'profile_form':profile_form,})\n\ndef list_post(request):\n\n\tposts = BlogPost.objects.all()[:10]\n\ttitle = ' | '.join(['Blog', 'Volward'])\n\n\treturn render(request,\n\t\t'app/list_blog_post.html',\n\t\t{'title':title,\n\t\t'posts':posts,})\n\n\ndef view_post(request, slug):\n\n\tpost = get_object_or_404(BlogPost, slug=slug)\n\ttitle = ' | '.join([post.title, 'Volward'])\n\n\treturn render(request,\n\t\t'app/view_blog_post.html',\n\t\t{'title':title,\n\t\t'post':post,})\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"70901031","text":"import numpy as np\nimport heapq\nimport bound\n\nclass KDTree:\n \"\"\"\n Construct a KDTree. It contains two different kinds of nodes: nodes and leafnodes.\n Leafnodes will not be split or sequenced. All points in the subtree with a smaller\n value than the node will appear in the left subtree and all points with larger value\n will be in the right subtree.\n\n Parameters:\n data: array, with n lines and m columns.\n leafsize: positive integer.\n\n Methods:\n build: build a kd-tree, root and tree data are maintained.\n root will be linked with its greater/left children.\n Each node has two values: maxes and mins,\n which represents the max/min bound of the nodes it contains.\n maxes/mins is the max/min values in all the dimensions.\n Each node is linked with the data array by memorizing the indexs of all the data in its bound.\n\n query_dual_tree: return density estimation for all the nodes in another KDTree.\n 1) if the difference is tolerable, the average of upper and lower would be the estimation\n for all the nodes in q's bound.\n 2) if the query node and data node are leafnode. compute all the density contribution and sum them.\n 3) else compute the density bound between the children of query node and data node.\n until the diff is tolerable or both tree reaches leafnodes.\n\n Usage: data = np.random.rand(100, 5)\n tree = dual_tree.KDTree(data, leafsize=2)\n \"\"\"\n\n def __init__(self, data, leafsize):\n \"\"\"\n :param data: array, with n lines and m columns.\n :param leafsize: The size of the leaf.\n Leafnodes will not be split or sequenced.\n :param maxes: upper bound array. The biggest value of each column/dimension owned by the this tree.\n :param mins: lower bound array. The smallest value of each column/dimension owned by the this tree.\n :param root: the root node of the tree, linked with its left and right child\n \"\"\"\n self.data = np.asarray(data)\n self.n, self.m = np.shape(self.data)\n self.leafsize = leafsize\n self.maxes = np.amax(self.data, axis=0)\n self.mins = np.amin(self.data, axis=0)\n self.root = self.build(np.arange(self.n), self.maxes, self.mins)\n\n # split value, dimensions and original data are not stored in the node.\n class Node:\n def __init__(self, leftchild, rightchild, idx, mins, maxes):\n \"\"\"\n :param leftchild:\n :param rightchild:\n :param idx: index of all the children nodes in the bound of current node.\n :param mins: The biggest value of each column/dimension owned by the this tree, array.\n :param maxes: The lower value of each column/dimension owned by the this tree, array.\n \"\"\"\n self.leftchild = leftchild # left children\n self.rightchild = rightchild # right children\n self.idx = idx\n self.mins = mins\n self.maxes = maxes\n\n # leafnode doesn't have children.\n class LeafNode:\n \"\"\"\n :param idx: index of all the children nodes in the bound of current node.\n :param mins: The biggest value of each column/dimension owned by the this tree, array.\n :param maxes: The lower value of each column/dimension owned by the this tree, array.\n \"\"\"\n def __init__(self, idx, mins, maxes):\n self.idx = idx\n self.mins = mins\n self.maxes = maxes\n\n def build(self, idx, maxes, mins):\n \"\"\"\n :param idx: position of node's value in the data array.\n :param maxes: biggest value in each dimension (column) in a narray.\n :param mins: smallest value in each dimension (column) in a narray.\n :return: link tree root with other nodes and save some information in the nodes.\n\n Build a tree:\n 1. check if the node is a leaf by using this condition: length(\"nodes in the bound\") <= required leafsize.\n 2. If upper bound == lower bound, there is one node left on this branch. It must be a leafnode.\n 3. Split the data, data which is smaller than the average of max value and min value on one dimension\n (which has the biggest range) will be included in the left-child node,\n other points would be in right-child node's bound.\n 4. If there is no point in left/right node's bound, split the data again based on the median rather than mean.\n 5. Repeat, until all the points are linked or stored in the leafnodes.\n\n This KDTree may not be balanced.\n \"\"\"\n # boundary condition1: if the node is a leafnode\n if len(idx) <= self.leafsize:\n leafmins = np.amin(self.data[idx], axis=0)\n leafmaxes = np.amax(self.data[idx], axis=0)\n return KDTree.LeafNode(idx, leafmins, leafmaxes)\n else:\n data = self.data[idx]\n # split on the dimension which has the largest range of value\n d = np.argmax(maxes-mins)\n maxval = maxes[d]\n minval = mins[d]\n\n # boundary condition2: if the upper bound == lower bound\n if maxval == minval:\n return KDTree.LeafNode(idx)\n\n data = data[:, d]\n split = (maxval + minval)/2\n\n leftchild_idx = np.nonzero(data <= split)[0]\n rightchild_idx = np.nonzero(data > split)[0]\n\n # If the dividing method is not proper, points would be split based on median value\n if len(leftchild_idx) == 0 or len(rightchild_idx) == 0:\n split = np.median(data, axis=0)\n leftchild_idx = np.nonzero(data <= split)[0]\n rightchild_idx = np.nonzero(data > split)[0]\n\n right_mins = np.copy(mins)\n right_mins[d] = split\n left_maxes = np.copy(maxes)\n left_maxes[d] = split\n\n # Recursion\n node0 = KDTree.Node(self.build(idx[leftchild_idx], left_maxes, mins),\n self.build(idx[rightchild_idx], maxes, right_mins),\n idx, mins, maxes)\n return node0\n\n def query_dual_tree(self, other, h=1, eps=0.0001):\n \"\"\"\n :param other: KDTree which contains query data.\n :param h: bandwidth.\n :param eps: accepted error rate.\n :return: an array, estimated density for each point in the query array.\n\n Query_dual_tree: return density estimation for all the nodes in another KDTree.\n 1. Get the u, l (upper/lower bound of density y) from upper/lower bound of x (input data)\n 2. If the difference between u and l is small enough, let them be the density for all the nodes contained in the bound.\n 3. If it is not, calculate the u, l for the children of the current nodes.\n Put the results in a priority queue, repeat 1, 2.\n 4. Repeat until we reach the leaf or there is no nodes in the queue.\n\n \"\"\"\n P = [] # Priority queue\n lower = np.zeros(other.n)\n upper = np.zeros(other.n)\n Nq = other.n\n heapq.heappush(P, (0, other.root, self.root))\n\n while P:\n temp, q, d = heapq.heappop(P)\n l, u = bound.find_bound(q, d, h)\n if (u-l) <= 2 * eps * np.min(lower[q.idx]) / Nq:\n lower, upper = self.traverse_add(q, lower, upper, l, u)\n elif isinstance(q, KDTree.LeafNode) and isinstance(d, KDTree.LeafNode):\n prob = self.kdebase(q, d, other, h)\n lower[q.idx] += prob\n upper[q.idx] += prob\n else:\n for qchild in self.children(q):\n for dchild in self.children(d):\n priority_index = self.priority(qchild, l, u)\n heapq.heappush(P, (priority_index, qchild, dchild))\n return (lower+upper)/2\n\n def traverse_add(self, q, lower, upper, l, u):\n \"\"\"\n :param q: one node query in the query data tree\n :param lower: maintain lower contribution of each point in the query data tree.\n :param upper: maintain upper contribution of each point in the query data tree.\n :param l: min value of density contribution\n :param u: max value of density contribution\n :return: two array.\n\n This function will let l, u be the lower and upper contribution value for each nodes in q's box.\n \"\"\"\n lower[q.idx] = l\n upper[q.idx] = u\n return lower, upper\n\n def kdebase(self, q, d, other, h):\n \"\"\"\n :param q: leafnode\n :param d: leafnode\n :param other: query tree\n :param h: bandwidth\n :return: density for a node in q's bound,\n contributed by all the nodes in d's bound.\n \"\"\"\n data_in_d = self.data[d.idx, :]\n data_in_q = other.data[q.idx, :]\n\n colnum_of_d = data_in_d.shape[1]\n rownum_of_q = data_in_q.shape[0]\n rownum_of_d = data_in_d.shape[0]\n result = np.zeros(rownum_of_q)\n\n H = np.diag((h ** 2) * np.ones(colnum_of_d))\n\n for i in range(rownum_of_q):\n for j in range(rownum_of_d):\n temp = (np.linalg.inv(H)**(1/2)).dot(data_in_q[i, :] - data_in_d[j, :])\n result[i] += np.linalg.det(H)**(-1/2) * bound.gaussian_density(temp, h) / self.n\n return result\n\n def children(self, q):\n \"\"\"\n :param q: input one node\n :return: return its children. Leafnode has no children, then return itself.\n \"\"\"\n if isinstance(q, KDTree.LeafNode):\n return [q]\n else:\n return [q.leftchild, q.rightchild]\n\n def priority(self, q, l, u):\n \"\"\"\n :param q: tree node\n :param l: lower bound of the contribution of all the points in q's bound\n :param u: upper bound of the contribution of all the points in q's bound\n :return: an index used to sort the elements in the priority queue.\n \"\"\"\n node_num = len(q.idx)\n return node_num * (u-l) + 0.0001 * np.random.uniform(0, 1, size=1)[0]\n\n def traverse(self, current_node, result):\n \"\"\"\n traverse all the points contained in the current_node.\n :param current_node: where the traverse process will start.\n :param result: index array, in the order of pre-order traversal method.\n \"\"\"\n if current_node is None or isinstance(current_node, KDTree.LeafNode):\n return\n result.append(current_node.idx)\n self.traverse(current_node.rightchild, result)\n self.traverse(current_node.leftchild, result)\n\n\n","sub_path":"code_sample/paper-replication/fast-pair-search-dual_tree/dual_tree.py","file_name":"dual_tree.py","file_ext":"py","file_size_in_byte":10723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"74798724","text":"import torch\nimport numpy as np\n\nimport time, threading, collections\n\nfrom torch.multiprocessing import Queue, Process\nfrom timebudget import timebudget\n\nfrom utils.fastmem import FastMemory\n\nclass MemoryServer(Process):#threading.Thread):#\n def __init__(self, descs, memory, credit_assign, brain, n_step, good_reach):\n super().__init__()\n\n # must be shared!! double call should be ok ?!\n brain.share_memory()\n\n self.memory = memory\n\n self.descs = descs\n\n self.credit = credit_assign\n self.brain = brain\n self.n_step = n_step\n self.good_reach = good_reach\n\n self.channel = Queue()\n self.sampler = [ Queue() for _ in descs ]\n\n self.sleep = .1\n self.lock = threading.Lock()\n\n def run(self):\n self.thread_pool = []\n self.storage = collections.deque(maxlen=100)\n self.cmd = { \"push\" : self._push, \"switch\" : self._switch }\n\n worker = threading.Thread(target=self._worker)\n worker.start()\n\n while True: # single thread is fine\n data = self.channel.get()\n cmd, data = data\n self.cmd[cmd](*data)\n\n worker.join()\n\n def _switch(self, sleep):\n self.sleep = sleep\n\n def _push(self, ep, chunks, e_i, goods):\n max_allowed = len(ep) - self.n_step - 1\n allowed_mask = [ bool(sum(goods[i:i+self.good_reach, e_i])) for i in range(max_allowed)\n ] + [False] * (len(ep) - max_allowed)\n\n _, episode = self.credit( # it is double\n *[ep[:, sum(chunks[:i+1]):sum(chunks[:i+2])] for i in range(len(chunks[:-1]))],\n brain=self.brain,\n recalc=True)\n idx = np.arange(len(episode))[allowed_mask]\n\n with self.lock:\n self.storage.append(episode[idx].share_memory_())\n assert self.storage[-1].is_shared()\n\n for sampler in self.sampler:\n sampler.put(self.storage[-1])\n\n self.memory.push(episode, allowed_mask)\n return episode[idx]\n\n def _worker(self):\n while not len(self.memory):\n time.sleep(.1)\n\n def update(ind): # curry curry\n def _update(recalc, indices, allowed_mask, episode):\n return self._update(ind, recalc, indices, allowed_mask, episode)\n return _update\n\n while True:\n while not self.sleep:\n time.sleep(.1)\n time.sleep(self.sleep)\n with self.lock: # lock only one sample, should be fast enough\n for i, desc in enumerate(self.descs):\n episode_batch = self.memory.sample(update(i), desc.batch_size, desc.memory_size)\n\n def _update(self, ind, recalc, indices, allowed_mask, episode):\n if recalc:\n return self._push_to_fast(ind, recalc, indices, allowed_mask, episode)\n\n # though i dont like many threads with python environment...\n self.thread_pool.append(\n threading.Thread(target=self._push_to_fast, args=(# do async as most expensive work\n ind, recalc, indices, allowed_mask, episode, )))\n\n self.thread_pool[-1].start()\n self._clean_threads()\n return torch.ones(len(allowed_mask), sum(self.memory.chunks))\n\n def _push_to_fast(self, ind, recalc, indices, allowed_mask, episode):\n goals, states, memory, actions, probs, rewards, _, _, _, _, _ = episode\n\n _, episode = self.credit(goals, states, memory, actions, probs, rewards,\n self.brain, recalc=recalc, indices=indices)\n\n idx = np.arange(len(episode))[allowed_mask]\n\n self.storage.append(episode[idx].share_memory_())\n assert self.storage[-1].is_shared()\n assert not episode[idx].is_shared()\n assert not episode[idx[0]].is_shared()\n self.sampler[ind].put(self.storage[-1])\n\n return episode\n\n def _clean_threads(self):\n if len(self.thread_pool) < 20:\n return\n self.thread_pool[0].join()\n del self.thread_pool[0]\n\nfrom timebudget import timebudget\n\nclass MemoryBoost:\n def __init__(self, descs, memory, credit_assign, brain, n_step, good_reach):\n self.fast_m = [ FastMemory(\n desc, memory.chunks, memory.device) for desc in descs ]\n\n self.server = [\n MemoryServer(descs, memory, credit_assign, brain, n_step, good_reach\n ) for _ in range(7) ]\n for server in self.server:\n server.start()\n\n memory.device = 'cpu'\n\n self.storage = collections.deque(maxlen=100)\n self.total = 0\n\n def __len__(self):\n assert False\n\n def push(self, ep, chunks, e_i, goods):\n self.total = 0\n self.storage.append(ep.share_memory_())\n # assert self.storage[-1].is_shared()\n for server in self.server:\n server.channel.put((\"push\", (self.storage[-1], chunks, e_i, goods)))\n\n @timebudget\n def step(self, ind, desc):\n if not len(self.fast_m[0]) and self.server[0].sampler[0].empty():\n return\n\n for server in self.server:\n server.channel.put((\"switch\", (.01,)))\n\n for server in self.server:\n min_draw = 2 # should be brain description property\n while min_draw > 0 or not server.sampler[ind].empty():\n episode = server.sampler[ind].get()\n self.fast_m[ind].push(episode.clone())\n del episode\n min_draw -= 1\n\n for server in self.server:\n server.channel.put((\"switch\", (.07,)))\n\n def sample(self, ind, _desc):\n for server in self.server:\n server.channel.put((\"switch\", (0.,)))\n return self.fast_m[ind].sample()\n","sub_path":"utils/memserver.py","file_name":"memserver.py","file_ext":"py","file_size_in_byte":5760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"480382290","text":"import discord\n\nimport os\nimport logging\n\nimport music\nimport display\n\nlogging.basicConfig(level=logging.INFO)\n\nclass MessageHandler():\n\tpass\n\nclass PreludeMessageHandler(MessageHandler):\n def __init__(self, prelude, client, music):\n self.prelude = prelude\n self.client = client\n self.music = music\n self.actions = {\n 'album': self._album\n }\n self.display = display.Discord()\n\n async def process(self, message):\n if message.content.startswith(self.prelude):\n split = message.content.split(' ')\n action = split[0][1:]\n body = ' '.join(split[1:])\n if action.lower() in self.actions:\n f = self.actions[action.lower()]\n await f(body, message)\n return True\n return False\n\n\n async def _album(self, body, message):\n result = self.music.search_album(body)\n if result is None:\n await self.client.send_message(message.channel, '\"{}\" returned no results'.format(body))\n return\n msg = self.display.album(result)\n await self.client.send_message(message.channel, embed=msg)\n\n\nclass BunnyBot():\n def __init__(self, token, prelude = '/'):\n self.token = token\n self.prelude = prelude\n client = discord.Client()\n client.event(self._on_message())\n self.client = client\n self.music = music.Music()\n\n self.handlers = [\n PreludeMessageHandler('/', self.client, self.music)\n ]\n\n def run(self):\n self.client.run(self.token)\n\n def _on_message(self):\n async def on_message(message):\n for h in self.handlers:\n if await h.process(message):\n return\n return on_message\n\n\ndef main():\n discord_key = os.environ.get('BUNNYBOT_DISCORD')\n bunnybot = BunnyBot(discord_key)\n bunnybot.run()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"bunnybot/bunnybot.py","file_name":"bunnybot.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"613201864","text":"from collections import defaultdict\n\nclass Solution:\n def findTargetSumWays(self, nums: List[int], target: int) -> int:\n cache = defaultdict(int) # some target t -> ways to sum to t\n cache[nums[0]] = cache[-nums[0]] = 2 if nums[0] is 0 else 1\n \n for num in nums[1:]:\n next_cache = defaultdict(int)\n for t in cache:\n next_cache[t - num] += cache[t]\n next_cache[t + num] += cache[t]\n cache = next_cache\n\n return cache[target]\n","sub_path":"python/target-sum.py","file_name":"target-sum.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"224130445","text":"#!/usr/bin/python3\n\nfrom typing import List\n\n\nclass Solution:\n def setZeroes(self, matrix: List[List[int]]) -> None:\n \"\"\"\n Do not return anything, modify matrix in-place instead.\n \"\"\"\n zeros = list()\n\n m = len(matrix)\n n = len(matrix[0])\n\n for i in range(m):\n for j in range(n):\n if not matrix[i][j]:\n zeros.append((i, j))\n\n for i, j in zeros:\n for col in range(n):\n matrix[i][col] = 0\n\n for row in range(m):\n matrix[row][j] = 0","sub_path":"力扣/73. 矩阵置零/矩阵置零.py","file_name":"矩阵置零.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"128665283","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process('FSVAL')\n\nprocess.load('Configuration.StandardSequences.Services_cff')\nprocess.load('FWCore.MessageService.MessageLogger_cfi')\nprocess.load('Study.BabyMaker.puSummaryInfoMaker_cfi')\nprocess.load('Study.BabyMaker.babyMaker_cfi')\n\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(-1)\n)\n\nprocess.source = cms.Source(\"PoolSource\",\n fileNames = cms.untracked.vstring(\n 'file:FILELOCATION1',\n 'file:FILELOCATION2',\n 'file:FILELOCATION3',\n 'file:FILELOCATION4',\n 'file:FILELOCATION5'\n )\n) \n\nprocess.out = cms.OutputModule(\"PoolOutputModule\",\n outputCommands = cms.untracked.vstring(\n 'drop *',\n 'keep *_*Maker*_*_FSVAL*'\n ), \n fileName = cms.untracked.string('newMixing.root')\n)\n\nprocess.outpath = cms.EndPath(process.out)\nprocess.p = cms.Path(process.babyMaker * process.puSummaryInfoMaker)\n","sub_path":"Study/BabyMaker/python/newMixing.py","file_name":"newMixing.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"311113456","text":"#\r\n## Tom\r\n#\r\n## Unit 5 Assignment 1\r\n#\r\n#\r\n## Deck of Cards Class\r\n#\r\n\r\n## Requirements:\r\n# Class for deck of cards\r\n# data for class will be the cards themselves\r\n# methods will be:\r\n#\r\n# deal- method that returns the value of the card on top\r\n# once dealt the card cannot be dealt again until\r\n# the deck is shuffled\r\n#\r\n# shuffle- method to return to the deck all dealt cards\r\n# for a total of 52 (no Jokers) in random order\r\n#\r\n# fan- method to simply list the cards in the deck, from\r\n# top to bottom of the deck\r\n#\r\n# isOrdered- method that returns True if the deck is in\r\n# order and false if not in order\r\n# - Note!!! - the deck does not need to be full\r\n# to be in order\r\n#\r\n# Order- method that sorts the deck so 2 of clubs low,\r\n# ace of spades high, c,d,h,s for suits\r\n# - Note!!! - order only done on full deck\r\n#\r\n#\r\n## Let's Begin!!! ##\r\n\r\n# get imports done, we will need these later\r\nimport random\r\n\r\nclass Card():\r\n\r\n cardCount = 0\r\n\r\n def __init__(self, face, suit):\r\n self.face = face\r\n self.suit = suit\r\n Card.cardCount += 1\r\n\r\n def getcardCount(self):\r\n return Card.cardCount\r\n\r\n def show(self):\r\n print(\"{} of {}\".format(self.face, self.suit))\r\n\r\n def getVal(self):\r\n suit_vals = {\"Clubs\": 1,\r\n \"Diamonds\": 2,\r\n \"Hearts\": 3,\r\n \"Spades\": 4}\r\n face_vals = {'2': 2,\r\n '3': 3,\r\n '4': 4,\r\n '5': 5,\r\n '6': 6,\r\n '7': 7,\r\n '8': 8,\r\n '9': 9,\r\n '10': 10,\r\n 'Jack': 11,\r\n 'Queen': 12,\r\n 'King': 13,\r\n 'Ace': 14}\r\n val = int(suit_vals[self.suit]*face_vals[self.face])\r\n return val\r\n\r\n #def equals(self, nxt_card):\r\n\r\nclass Deck():\r\n \r\n deckCount = 0\r\n\r\n def __init__(self):\r\n self.cards = []\r\n self.initialize_deck()\r\n Deck.deckCount += 1\r\n \r\n def initialize_deck(self):\r\n\r\n suits = ['Clubs', 'Diamonds', 'Hearts', 'Spades']\r\n faces = ['2', '3', '4', '5', '6', '7', '8', '9', '10',\r\n 'Jack', 'Queen', 'King', 'Ace']\r\n\r\n for suit in suits:\r\n for face in faces:\r\n self.cards.append(Card(face,suit))\r\n\r\n\r\n def show(self):\r\n # this method is to check myself, will delete later maybe\r\n for card in self.cards:\r\n card.show()\r\n\r\n\r\n def shuffle(self):\r\n # we can think of cards[-1] as the \"top\" card cuz the deck is face down\r\n\r\n # this method will shuffle the deck\r\n # first we iterate through our deck in reverse\r\n # we generate a random value and swap card in\r\n # position i with the card in position rand\r\n \r\n for i in range(len(self.cards)-1,0,-1):\r\n rand = random.randint(0,i)\r\n self.cards[i], self.cards[rand] = self.cards[rand], self.cards[i]\r\n\r\n def deal(self):\r\n # we can think of cards[-1] as the \"top\" card cuz the deck is face down\r\n # we use the list method \"pop(index)\" to print a list item\r\n # and then remove it from that list\r\n return self.cards.pop()\r\n\r\n def fan(self):\r\n for card in reversed(self.cards):\r\n card.show()\r\n\r\n def isOrdered(self):\r\n for i in range(len(self.cards)-2):\r\n nxt = i + 1\r\n card = self.cards[i]\r\n nxt_card = self.cards[nxt]\r\n if card.getVal() < nxt_card.getVal():\r\n i = i + 1\r\n else:\r\n return False\r\n\r\n def Order(self):\r\n while not self.isOrdered():\r\n for i in range(len(self.cards)-2):\r\n nxt = i + 1\r\n card = self.cards[i]\r\n nxt_card = self.cards[nxt]\r\n if card.getVal() < nxt_card.getVal():\r\n self.cards[i], self.cards[nxt] = self.cards[nxt], self.cards[i]\r\n else:\r\n print(i)\r\n i = i + 1\r\n\r\n def sortDeck(self):\r\n\r\n suits = ['Clubs', 'Diamonds', 'Hearts', 'Spades']\r\n faces = ['2', '3', '4', '5', '6', '7', '8', '9', '10',\r\n 'Jack', 'Queen', 'King', 'Ace']\r\n\r\n \r\n self.cards = self.cards.sort(key = Card.getVal)\r\n\r\n def arg_ord(self):\r\n return self.cards.sort()\r\n \r\n \r\n \r\n\r\n# main\r\n\r\nmyDeck = Deck()\r\nmyDeck.shuffle()\r\nprint(\"***********************\")\r\n'''\r\nmyCard = myDeck.deal()\r\nmyCard.show()\r\nmyCard.getVal()\r\n'''\r\nprint(\"***********************\")\r\nmyDeck.fan()\r\nprint(\"***********************\")\r\n\r\nprint(myDeck.isOrdered())\r\nmyDeck.arg_ord()\r\nprint(myDeck.isOrdered())\r\nmyDeck.fan()\r\n","sub_path":"DeckLib.py","file_name":"DeckLib.py","file_ext":"py","file_size_in_byte":5126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"34421390","text":"#return the largest from a list\nl1=[6,7,3,19,35,24,9,6]\nprint(max(l1))\n\n#return addition and subtration of number\ndef calc(a1,a2):\n return a1+a2, a1-a2\nresult=calc(25,13)\nprint(result) \n\n#sum all the numbers in a list\n\ndef sum(n):\n total = 0\n for x in n:\n total += x\n return total\nprint(sum((8,4,3,2,6))) \n\n#multiplication of numbers\n\ndef mul(number):\n total1 = 1\n for i in number:\n total1 *= i\n return total1\nprint(mul((2,8,4,5,1,6))) \n\n#swap the first and last element in list\ndef swap(li):\n size=len(li)\n t=li[0]\n li[0]=li[size-1]\n li[size-1]=t\n return li\nli=[15,98,23,45,11]\nprint(swap(li))\n","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"211406197","text":"import sys, math\n\ndef main():\n\twith open(sys.argv[1]) as f:\n\t\tfor line in f:\n\t\t\tmatrix = line.rstrip().split(\" \")\n\t\t\tside = int(math.sqrt(len(matrix)))\n\t\t\tresult = \"\"\n\t\t\tfor i in range(side):\n\t\t\t\tfor j in range(side - 1, -1, -1):\n\t\t\t\t\tresult += matrix[(j * side) + i] + \" \"\n\t\t\tprint(result)\n\nif __name__ == '__main__':\n\tmain()","sub_path":"Codeeval/Matrix Rotation/P.py","file_name":"P.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"638260229","text":"'''\n* Copyright (C) 2019-2020 Intel Corporation.\n*\n* SPDX-License-Identifier: BSD-3-Clause\n'''\n\nimport sys\nimport os\nimport shutil\nimport subprocess\nimport tempfile\nfrom pathlib import Path\nimport requests\nfrom jsonschema import Draft7Validator, FormatChecker\nimport config as cfg\nfrom utils import create_convert_command\nfrom utils import create_download_command\nfrom utils import create_directory\nfrom utils import load_document\n\ndef find_model_root(model, output_dir):\n for root, directories, _ in os.walk(output_dir):\n if model in directories:\n return os.path.abspath(os.path.join(root, model))\n return None\n\ndef download_model_proc(target_model, model):\n if os.path.isdir(cfg.model_proc_root):\n for root, _, files in os.walk(cfg.model_proc_root):\n for filepath in files:\n if os.path.splitext(filepath)[0] == model:\n model_proc = os.path.join(root, filepath)\n else:\n url = cfg.base_gst_video_analytics_repo_url + '{0}.json'.format(model)\n gva_response = requests.get(url)\n temp_dir = tempfile.TemporaryDirectory()\n if gva_response.status_code == 200:\n with open('{0}/{1}.json'.format(temp_dir.name, model), 'wb') as out_file:\n out_file.write(gva_response.content)\n print(\"Downloaded {0} model-proc file from gst-video-analytics repo\".format(model))\n else:\n print(\"Warning, model-proc not found in gst-video-analytics repo.\")\n print(\"Creating empty json file for {0} to allow model to load in \"\\\n \"VA-Serving\".format(model))\n print(\"Do not specify model-proc in pipeline that utilizes this model\")\n Path('{0}/{1}.json'.format(temp_dir.name, model)).touch()\n model_proc = os.path.abspath('{0}/{1}.json'.format(temp_dir.name, model))\n shutil.move(model_proc, os.path.join(target_model, '{}.json'.format(model)))\n\ndef validate_schema(model_list):\n schema = {\n \"type\" : \"array\",\n \"items\" : {\n \"oneOf\": [\n {\n \"type\" : \"object\",\n \"properties\" : {\n \"model\" : {\"type\" : \"string\"},\n \"alias\" : {\"type\" : \"string\"},\n \"version\" : {\"type\" : [\"string\", \"integer\"]},\n \"precision\" : {\n \"type\" : \"array\",\n \"items\" : {\"enum\" : [\"FP32\", \"FP16\", \"INT8\",\n \"FP16-INT8\", \"FP32-INT8\",\n \"FP32-INT1\", \"FP16-INT1\", \"INT1\"]}\n }\n },\n \"required\" : [\"model\"],\n \"additionalProperties\": False\n },\n {\n \"type\" : \"string\"\n }\n ]\n }\n }\n try:\n validator = Draft7Validator(schema, format_checker=FormatChecker())\n validator.validate(model_list)\n except Exception as err:\n print(\"Yaml input schema validation error.\")\n print(err)\n sys.exit(1)\n\ndef download_and_convert_model(target_root, model, force):\n precisions = None\n if isinstance(model, dict):\n model_name = model.get('model')\n alias = model.get('alias', None)\n precisions = model.get('precision', None)\n if alias is not None:\n target_model = os.path.join(target_root, alias)\n else:\n target_model = os.path.join(target_root, model_name)\n model_version = model.get('version', None)\n if model_version is not None:\n target_model = os.path.join(target_model, str(model_version))\n else:\n target_model = os.path.join(target_model, \"1\")\n else:\n model_name = model\n target_model = os.path.join(os.path.join(target_root, model_name), \"1\")\n\n if (not force) and (os.path.isdir(target_model)):\n print(\"Model Directory {0} Exists - Skipping\".format(target_model))\n return\n\n with tempfile.TemporaryDirectory() as output_dir:\n command = create_download_command(model_name, output_dir, precisions)\n print(' '.join(command))\n result = subprocess.run(command, check=False)\n if result.returncode != 0:\n print(\"Error occured while downloading {0} model.\".format(model_name))\n print(\"Please remove from input yml file and try again.\")\n sys.exit(1)\n command = create_convert_command(model_name, output_dir, precisions)\n print(' '.join(command))\n subprocess.run(command, check=False)\n model_path = find_model_root(model_name, output_dir)\n if os.path.isdir(target_model):\n #print(\"Directory {0} already exists, overwriting it.\".format(target_model))\n shutil.rmtree(target_model)\n for filename in os.listdir(model_path):\n if os.path.isdir(os.path.join(model_path, filename)):\n shutil.move(os.path.join(model_path, filename),\n os.path.join(target_model, filename))\n download_model_proc(target_model, model_name)\n\ndef download(model_list_path, output_dir, force):\n model_list = load_document(model_list_path)\n if model_list is None:\n sys.exit(1)\n validate_schema(model_list)\n target_root = os.path.join(output_dir, \"models\")\n create_directory(target_root, False)\n for model in model_list:\n download_and_convert_model(target_root, model, force)\n","sub_path":"tools/model_downloader/downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":5550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"582553711","text":"#!/usr/bin/env python\nimport argparse\nimport torch\n\nfrom brnolm.language_models import ffnn_models, vocab, language_model\nfrom brnolm.language_models.decoders import FullSoftmaxDecoder\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='PyTorch FFNN Language Model')\n parser.add_argument('--wordlist', type=str, required=True,\n help='word -> int map; Kaldi style \"words.txt\"')\n parser.add_argument('--unk', type=str, default=\"\",\n help='expected form of \"unk\" word. Most likely a or ')\n parser.add_argument('--emsize', type=int, default=200,\n help='size of word embeddings')\n parser.add_argument('--nhid', type=int, default=200,\n help='number of hidden units per layer')\n parser.add_argument('--hist-len', type=int, default=2,\n help='number of input words. If n-grams are being modelled, then (n-1)')\n parser.add_argument('--dropout', type=float, default=0.2,\n help='dropout applied to layers (0 = no dropout)')\n parser.add_argument('--tied', action='store_true',\n help='tie the word embedding and softmax weights')\n parser.add_argument('--seed', type=int, default=1111,\n help='random seed')\n parser.add_argument('--save', type=str, required=True,\n help='path to save the final model')\n args = parser.parse_args()\n\n # Set the random seed manually for reproducibility.\n torch.manual_seed(args.seed)\n\n print(\"loading vocabulary...\")\n with open(args.wordlist, 'r') as f:\n vocabulary = vocab.vocab_from_kaldi_wordlist(f, args.unk)\n\n print(\"building model...\")\n\n model = ffnn_models.BengioModel(\n len(vocabulary), args.emsize, args.hist_len,\n args.nhid, args.dropout\n )\n\n decoder = FullSoftmaxDecoder(args.nhid, len(vocabulary))\n\n lm = language_model.LanguageModel(model, decoder, vocabulary)\n torch.save(lm, args.save)\n","sub_path":"scripts/model-building/build-shallow-nn.py","file_name":"build-shallow-nn.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"526868939","text":"def partition(arr, low, high):\n pivot = low\n\n for i in range(low + 1, high + 1):\n if arr[i] <= arr[low]:\n pivot += 1\n # Yes these three lines could be rewritten as a more pythonic one\n # liner but for educational reasons I think I'll code more\n # verbosely\n temp = arr[i]\n arr[i] = arr[pivot]\n arr[pivot] = temp\n temp = arr[pivot]\n arr[pivot] = arr[low]\n arr[low] = temp\n return pivot\n\n\ndef quickSortRecursive(arr, low, high):\n if low >= high:\n return\n pivot = partition(arr, low, high)\n quickSortRecursive(arr, low, pivot - 1)\n quickSortRecursive(arr, pivot + 1, high)\n return arr\n\n\ndef quickSort(arr):\n low = 0\n high = len(arr) - 1\n return quickSortRecursive(arr, low, high)\n\n\narr = [5, 1, 4, 2, 8, 7, 6, 9, 3]\nans = quickSort(arr)\nprint(ans)\n","sub_path":"algorithms/sorting/quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"551282357","text":"import os\nimport numpy as np\nimport pandas as pd\nfrom sklearn.impute import KNNImputer\n\nfile_path = os.path.join(\"cleanedbigfoodlist(numberonly).csv\")\ndirty_df = pd.read_csv(file_path)\nindexed_df = dirty_df.set_index(\"NDB_No\")\n# print(indexed_df.head())\nnum_df = indexed_df.drop(columns=[\"Shrt_Desc\", \"Weight_desc(Unit)\", \"GmWt_Desc2(Unit)\"])\n# num_df = num_df.astype(float)\n# print(num_df.head())\n\n\n# Define N_neighbors value\nimputer = KNNImputer(n_neighbors=5)\n\n# Impute/Fill Missing Values\ndf_filled = imputer.fit_transform(num_df)\n\n# print(df_filled)\n# print(type(df_filled))\n\ncolumns = columns = [\"Water\", \"Energy\", \"Protein\", \"Lipid_Total\", \"Carbohydrate\", \"Fiber\", \"Sugar_Total\", \"Calcium\", \"Iron\", \"Magnesium\", \n \"Phosphorus\", \"Potassium\", \"Sodium\", \"Zinc\", \"Copper\", \"Manganese\", \"Selenium\", \"Vitamin_C\", \"Thiamin\", \"Riboflavin\", \n \"Niacin\", \"Panto_Acid\", \"Vitamin_B6\", \"Folate_Total\", \"Folic_Acid\", \"Food_Folate_mcg\", \"Folate_DFE_mcg\", \"Choline_Tot_mg\", \"Vitamin_B12\", \"Vit_A_IU\",\n \"Vitamin_A\", \"Retinol\", \"Alpha_Carot_mcg\", \"Beta_Carot_mcg\", \"Beta_Crypt_mcg\", \"Lycopene_mcg\", \"Lut+Zea_mcg\", \"Vitamin_E\", \"Vitamin_D\", \"Vit_D_IU\", \n \"Vitamin_K\", \"FA_Sat_g\", \"FA_Mono_g\", \"FA_Poly_g\", \"Cholestrol\", \"Weight_grams\", \"Weight_desc\", \"GmWt_2\", \"GmWt_Desc2\", \"Refuse_Pct\"]\n\ndf = pd.DataFrame(data=df_filled, columns=columns)\n\n# print(df.head())\n\ndf.to_csv(\"cleanedbigfooddata.csv\")\n","sub_path":"db/scripts/cleanupbigfoodlist/knnimputer.py","file_name":"knnimputer.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"12339326","text":"class SpellError:\n def __init__(self, start, end, msg, context='', raw=None, source_side=False, short='', filename='anonym', code='err'):\n self.start = start\n self.end = end\n self.message = msg\n self.short = short\n self.code = code\n self.filename = filename\n self._alt_file = 'anonym'\n self.source_side = source_side\n self.context = context\n self.raw = raw\n\n def __str__(self):\n return ''\n\n def toggle_pos_mode(self, pos_map):\n if self.source_side:\n self.start, self.end = \\\n pos_map.src_to_dest_range(self.filename, self.start, self.end)\n self.filename = self._alt_file\n else:\n self._alt_file = self.filename\n self.filename, self.start, self.end = \\\n pos_map.dest_to_src_range(self.start, self.end)\n\n self.source_side = not self.source_side\n return self.source_side\n","sub_path":"rplugin/python3/texspell/error.py","file_name":"error.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"195014695","text":"from stack_array import * #Needed for Depth First Search\nfrom queue_array import * #Needed for Breadth First Search\n\nclass Vertex:\n '''Add additional helper methods if necessary.'''\n def __init__(self, key):\n '''Add other attributes as necessary'''\n self.color = None # color of the vertex object\n self.id = key # the name of the current vertex\n self.adjacent_to = [] # list of vertices that the current vertex is adj to\n self.visited = False\n \n # def __repr__(self):\n # return (str(self.id) + \" \" + str(self.adjacent_to))\n\nclass Graph:\n '''Add additional helper methods if necessary.'''\n def __init__(self, filename):\n '''reads in the specification of a graph and creates a graph using an adjacency list representation. \n You may assume the graph is not empty and is a correct specification. E.g. each edge is \n represented by a pair of vertices. Note that the graph is not directed so each edge specified \n in the input file should appear on the adjacency list of each vertex of the two vertices associated \n with the edge.'''\n\n #prepwork\n self.graph = {} #create dictionary for vertices\n self.visited = [] # list of vertices that are visited\n vertices = [] #list of vertices used to make the graph\n\n #read into the file and append to list of vertices\n file = open(filename,\"r\") \n Content = file.read()\n for line in Content.split():\n vertices.append(line)\n \n self.create_graph(vertices, self.graph)\n file.close()\n\n\n def create_graph(self, vertices, dict):\n #iterates every other index (first index of a pair)\n for i in range(0, len(vertices), 2):\n\n if vertices[i] not in dict: #if first vertex is not in the dictionary\n dict[vertices[i]] = Vertex(vertices[i]) #create a new first vertex object in the dictionary\n\n if vertices[i + 1] not in dict: #if the second vertex of the pair does not exist\n dict[vertices[i + 1]] = Vertex(vertices[i + 1]) #create a new second vertex object in the dictionary\n\n dict[vertices[i + 1]].adjacent_to.append(vertices[i]) #append first vertex to second vertex's adj list\n dict[vertices[i]].adjacent_to.append(vertices[i + 1]) #append second vertex to first vertex's adj list\n\n\n def add_vertex(self, key):\n '''Add vertex to graph, only if the vertex is not already in the graph.'''\n if key not in self.graph:\n self.graph[key] = Vertex(key)\n\n\n def get_vertex(self, key):\n '''Return the Vertex object associated with the id. If id is not in the graph, return None'''\n if key in self.graph:\n return self.graph[key]\n return None\n\n\n def add_edge(self, v1, v2):\n '''v1 and v2 are vertex id's. As this is an undirected graph, add an \n edge from v1 to v2 and an edge from v2 to v1. You can assume that\n v1 and v2 are already in the graph'''\n #first add v2 to v1's adj list\n self.get_vertex(v1).adjacent_to.append(v2)\n #first add v1 to v2's adj list\n self.get_vertex(v2).adjacent_to.append(v1)\n\n\n def get_vertices(self):\n '''Returns a list of id's representing the vertices in the graph, in ascending order'''\n #prepwork\n idList = []\n #for every key in the graph dictionary, append them into a list\n for id in self.graph.keys():\n idList.append(id)\n \n #sort list in order\n idList.sort()\n return idList\n\n\n def conn_components(self): \n '''Returns a list of lists. For example, if there are three connected components \n then you will return a list of three lists. Each sub list will contain the \n vertices (in ascending order) in the connected component represented by that list.\n The overall list will also be in ascending order based on the first item of each sublist.\n This method MUST use Depth First Search logic!'''\n #prepwork\n finalList = []\n self.visited = [] #reset visited vertices\n\n vertexList = self.get_vertices()\n\n for vertex in vertexList:\n if vertex not in self.visited:\n list = self.depth_first_search(vertex)\n # print(list)\n list.sort()\n # print(list)\n finalList.append(list)\n\n return finalList\n\n\n def depth_first_search(self, vertex):\n '''vertex is a str'''\n #prepwork\n stack = Stack(len(self.graph)) #create stack\n visitedList = [] #create a list for the vertices visited\n\n currentVertex = vertex\n self.visited.append(currentVertex)\n visitedList.append(currentVertex)\n stack.push(currentVertex) #push the current vertex onto stack\n\n while not stack.is_empty():\n currentVertex = stack.pop() #pop the stack\n # print('popped')\n currentAdjList = self.get_vertex(currentVertex).adjacent_to #set a variable to the current vertex's adjList\n\n #for every vertex in the current vertex's adj list\n for vertex in currentAdjList:\n # print(vertex) #PRINT DEBUG\n # print('stack',stack)\n if vertex not in visitedList: #if the vertex is not already visited\n self.visited.append(vertex)\n visitedList.append(vertex)\n # print(visitedList) #PRINT DEBUG\n stack.push(vertex)\n # print('pushed')\n \n return visitedList\n \n \n def reset_visit(self):\n for vertex in self.get_vertices():\n self.get_vertex(vertex).visited = False\n\n\n def is_bipartite(self):\n '''Returns True if the graph is bicolorable and False otherwise.\n This method MUST use Breadth First Search logic!'''\n #prepwork\n self.reset_visit()\n componentList = self.conn_components()\n \n for component in componentList:\n vertex = component[0] #set vertex to the first vertex in the component list\n boolean = self.breath_first_search(vertex) #if the component is biparate or not true or false\n\n #if the component is not biparate\n if boolean == False: \n return False\n\n return True\n \n\n\n def breath_first_search(self, vertex):\n #prepwork\n queue = Queue(len(self.graph)) #create a queue that is the size of the vertices in the graph\n currentVertex = vertex\n self.get_vertex(currentVertex).color = 'Black'\n queue.enqueue(currentVertex)\n\n while not queue.is_empty():\n #dequeue to get vertex\n currentVertex = queue.dequeue()\n #set a variable to the current vertex's adjList\n currentAdjList = self.get_vertex(currentVertex).adjacent_to \n\n #enqueue all of its children\n for vertex in currentAdjList:\n \n if not self.get_vertex(vertex).visited: #if the color is None it means it hasnt been visited yet\n #set visited to True\n self.get_vertex(vertex).visited = True\n #enqueue the vertex into the queue\n queue.enqueue(vertex)\n\n #if the parent of the vertex color is Red\n if self.get_vertex(currentVertex).color == 'Red':\n self.get_vertex(vertex).color = 'Black' #set current node color to black\n elif self.get_vertex(currentVertex).color == 'Black':\n self.get_vertex(vertex).color = 'Red' #set current node color to black\n\n #if the child does has the same color as its parent it means it is False\n elif self.get_vertex(vertex).color == self.get_vertex(currentVertex).color:\n return False\n \n return True\n\n\n\n\n\n","sub_path":"p5-jhtranx/graph copy.py","file_name":"graph copy.py","file_ext":"py","file_size_in_byte":8033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"587076756","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nimport re\nfrom django.utils.deprecation import MiddlewareMixin\nfrom django.shortcuts import HttpResponse, redirect, render, reverse\nfrom django.conf import settings\n\n\nclass RbacMiddleware(MiddlewareMixin):\n \"\"\"\n 用户权限信息校验\n \"\"\"\n\n def process_request(self, request):\n \"\"\"\n 当用户请求刚进入时候出发执行\n :param request:\n :return:\n \"\"\"\n\n \"\"\"\n 1. 获取当前用户请求的URL\n 2. 获取当前用户在session中保存的权限列表 ['/customer/list/','/customer/list/(?P\\\\d+)/']\n 3. 权限信息匹配\n \"\"\"\n current_url = request.path_info\n for valid_url in settings.VALID_URL_LIST:\n if re.match(valid_url, current_url):\n # 白名单中的URL无需权限验证即可访问\n return None\n\n permission_dict = request.session.get(settings.PERMISSION_SESSION_KEY)\n if not permission_dict:\n return redirect(reverse('main:login'))\n # return HttpResponse('未获取到用户权限信息,请登录!')\n\n url_record = [\n {'title': '首页', 'url': '/home/'}\n ]\n\n # 此处代码进行判断\n for url in settings.NO_PERMISSION_LIST:\n if re.match(url, request.path_info):\n # 需要登录,但无需权限校验\n request.current_selected_permission = 0\n request.breadcrumb = url_record\n\n return None\n\n flag = False\n\n for item in permission_dict.values():\n reg = \"^%s$\" % item['url']\n if re.match(reg, current_url):\n flag = True\n request.current_selected_permission = item['pid'] or item['id']\n if not item['pid']:\n url_record.extend([{'title': item['title'], 'url': item['url'], 'class': 'active'}])\n else:\n url_record.extend([\n {'title': item['p_title'], 'url': item['p_url']},\n {'title': item['title'], 'url': item['url'], 'class': 'active'},\n ])\n request.breadcrumb = url_record\n break\n\n if not flag:\n return render(request, '403.html')\n # return HttpResponse('无权访问')\n","sub_path":"cmdb_rbac/middlewares/rbac.py","file_name":"rbac.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"292256740","text":"import time\n\nimport FWCore.ParameterSet.Config as cms\nimport FWCore.ParameterSet.VarParsing as VarParsing\n\noptions = VarParsing.VarParsing()\noptions.register('runNumber',\n 100000, #default value\n VarParsing.VarParsing.multiplicity.singleton,\n VarParsing.VarParsing.varType.int,\n \"Run number; default gives latest IOV\")\noptions.register('messageLevel',\n 0, #default value\n VarParsing.VarParsing.multiplicity.singleton,\n VarParsing.VarParsing.varType.int,\n \"Message level; default to 0\")\noptions.register('globalTag',\n 'GR_P_V50', #default value\n VarParsing.VarParsing.multiplicity.singleton,\n VarParsing.VarParsing.varType.string,\n \"GlobalTag\")\noptions.register('pfnPrefix',\n '', #default value\n VarParsing.VarParsing.multiplicity.singleton,\n VarParsing.VarParsing.varType.string,\n \"PFN prefix in GlobalTag connection strings\")\noptions.register('pfnPostfix',\n '', #default value\n VarParsing.VarParsing.multiplicity.singleton,\n VarParsing.VarParsing.varType.string,\n \"PFN postfix in GlobalTag connection strings\")\noptions.register('refresh',\n 0, #default value\n VarParsing.VarParsing.multiplicity.singleton,\n VarParsing.VarParsing.varType.int,\n \"Refresh type: default no refresh\")\noptions.register('eventsPerLumi',\n 100, #default value\n VarParsing.VarParsing.multiplicity.singleton,\n VarParsing.VarParsing.varType.int,\n \"number of events per lumi\")\noptions.register('numberOfLumis',\n 100, #default value\n VarParsing.VarParsing.multiplicity.singleton,\n VarParsing.VarParsing.varType.int,\n \"number of lumisections per run\")\noptions.register('numberOfRuns',\n 100, #default value\n VarParsing.VarParsing.multiplicity.singleton,\n VarParsing.VarParsing.varType.int,\n \"number of runs in the job\")\noptions.parseArguments()\n\nprocess = cms.Process(\"TEST\")\n\nprocess.MessageLogger = cms.Service( \"MessageLogger\",\n destinations = cms.untracked.vstring( 'detailedInfo' ),\n detailedInfo = cms.untracked.PSet( threshold = cms.untracked.string( 'INFO' ) ),\n )\n\n#process.add_( cms.Service( \"PrintEventSetupDataRetrieval\",\n# printProviders=cms.untracked.bool( True )\n# )\n# )\n\nCondDBSetup = cms.PSet( DBParameters = cms.PSet( authenticationPath = cms.untracked.string( '.' ),\n connectionRetrialPeriod = cms.untracked.int32( 10 ),\n idleConnectionCleanupPeriod = cms.untracked.int32( 10 ),\n messageLevel = cms.untracked.int32( 0 ),\n enablePoolAutomaticCleanUp = cms.untracked.bool( False ),\n enableConnectionSharing = cms.untracked.bool( True ),\n connectionRetrialTimeOut = cms.untracked.int32( 60 ),\n connectionTimeOut = cms.untracked.int32( 0 ),\n enableReadOnlySessionOnUpdateConnection = cms.untracked.bool( False )\n )\n )\n\nCondDBSetup.DBParameters.messageLevel = options.messageLevel\n\nrefreshAlways, refreshOpenIOVs, refreshEachRun, reconnectEachRun = False, False, False, False\nif options.refresh == 0:\n refreshAlways, refreshOpenIOVs, refreshEachRun, reconnectEachRun = False, False, False, False\nelif options.refresh == 1:\n refreshAlways = True\n refreshOpenIOVs, refreshEachRun, reconnectEachRun = False, False, False\nelif options.refresh == 2:\n refreshAlways = False\n refreshOpenIOVs = True\n refreshEachRun, reconnectEachRun = False, False\nelif options.refresh == 3:\n refreshAlways, refreshOpenIOVs = False, False\n refreshEachRun = True\n reconnectEachRun = False\nelif options.refresh == 4:\n refreshAlways, refreshOpenIOVs, refreshEachRun = False, False, False\n reconnectEachRun = True\n\nprocess.GlobalTag = cms.ESSource( \"PoolDBESSource\",\n CondDBSetup,\n connect = cms.string( 'frontier://FrontierProd/CMS_CONDITIONS' ),\n #connect = cms.string('sqlite_fip:CondCore/TagCollection/data/GlobalTag.db'), #For use during release integration\n globaltag = cms.string( '' ),\n RefreshAlways = cms.untracked.bool( refreshAlways ),\n RefreshOpenIOVs = cms.untracked.bool( refreshOpenIOVs ),\n RefreshEachRun=cms.untracked.bool( refreshEachRun ),\n ReconnectEachRun=cms.untracked.bool( reconnectEachRun ),\n DumpStat=cms.untracked.bool( True ),\n pfnPrefix=cms.untracked.string( '' ), \n pfnPostfix=cms.untracked.string( '' )\n )\n\nprocess.GlobalTag.globaltag = options.globalTag\n\nif options.pfnPrefix:\n process.GlobalTag.pfnPrefix = options.pfnPrefix\nif options.pfnPostfix:\n process.GlobalTag.pfnPostfix = options.pfnPostfix\n\n#TODO: add VarParsing support for adding custom conditions\n#process.GlobalTag.toGet = cms.VPSet()\n#process.GlobalTag.toGet.append(\n# cms.PSet(record = cms.string(\"BeamSpotObjectsRcd\"),\n# tag = cms.string(\"firstcollisions\"),\n# connect = cms.untracked.string(\"frontier://PromptProd/CMS_COND_31X_BEAMSPOT\")\n# )\n#)\n\nprocess.source = cms.Source( \"EmptySource\",\n firstRun = cms.untracked.uint32( options.runNumber ),\n firstTime = cms.untracked.uint64( ( long( time.time() ) - 24 * 3600 ) << 32 ), #24 hours ago in nanoseconds\n numberEventsInRun = cms.untracked.uint32( options.eventsPerLumi * options.numberOfLumis ), # options.numberOfLumis lumi sections per run\n numberEventsInLuminosityBlock = cms.untracked.uint32( options.eventsPerLumi )\n )\n\n\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32( options.eventsPerLumi * options.numberOfLumis * options.numberOfRuns ) ) #options.numberOfRuns runs per job\n\nprocess.get = cms.EDAnalyzer( \"EventSetupRecordDataGetter\",\n toGet = cms.VPSet(),\n verbose = cms.untracked.bool( True )\n )\n\nprocess.p = cms.Path( process.get )\n","sub_path":"CondCore/ESSources/test/python/loadall_from_gt_empty_source_cfg.py","file_name":"loadall_from_gt_empty_source_cfg.py","file_ext":"py","file_size_in_byte":7173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"578464214","text":"class Solution:\n def findMin(self, nums: List[int]) -> int:\n if len(nums) == 1:\n return nums[0]\n low = 0\n high = len(nums) - 1\n while high > low:\n pivot = low + (high - low) // 2\n if nums[pivot] < nums[high]:\n high = pivot\n # alternative: high = pivot - 1\n # too aggressive to move the `high` index,\n # it won't work for the test case of [3, 1, 3]\n # Case 2):\n elif nums[pivot] > nums[high]:\n low = pivot + 1\n # Case 3):\n else:\n high -= 1\n # the 'low' and 'high' index converge to the inflection point.\n return nums[low]\n","sub_path":"python/154.py","file_name":"154.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"150253618","text":"#-*- coding: utf-8 -*-\nfrom typing import List\n\n\"\"\"\n20190809\nO(n**2)の回答しかかけずにアウト\n\"\"\"\n\n\"\"\"\ntechlead tip\n問題が与えられと時の条件を、\nなんでそうな条件が与えられているだろうか?\nそこにヒントが隠されていないか考える\n\n例えば、今回であれば、組み合わせではなく、パーしションで\n順序は変わらないという点\n\n\nこの問題は分析力をみられる問題ですね。\n\nx=3で割った数になるということ。\nジョジョは変わらないでワンタイム パスで\n合計がxが三回登場して最後まで操作が終わること\n\nこれが言えれば点数がもらえるという問題\n\"\"\"\n\nclass Solution_timeexceeded:\n def canThreePartsEqualSum(self, A: List[int]) -> bool:\n for i in range(len(A)-2):\n for j in range(i+1, len(A)-1):\n # print(A)\n # print(list(range(len(A))))\n # print(i, j)\n # print(\"1st:%s\" % A[0:i+1])\n # print(\"2nd:%s\" % A[i+1:j+1])\n # print(\"3rd:%s\" % A[j+1:])\n if sum(A[0:i+1]) == sum(A[i+1:j+1]) == sum(A[j+1:]):\n return True\n return False\n\n\nclass Solution_timeexceeded_still:\n def canThreePartsEqualSum(self, A: List[int]) -> bool:\n total = sum(A)\n for i in range(len(A)-2):\n first = sum(A[0:i+1])\n for j in range(i+1, len(A)-1):\n second = sum(A[i+1:j+1])\n third = total - (first + second)\n if first == second == third:\n return True\n return False\n\n\nclass Solution_time_exceeded_damn_stil_bad_at_this_point_I_should_have_noticed_n2_solution_no_good:\n \"\"\"\n adding backtracking\n \"\"\"\n def canThreePartsEqualSum(self, A: List[int]) -> bool:\n total = sum(A)\n avg = int(total /3)\n first = 0\n for i in range(len(A)-2):\n first += A[i]\n if first > avg: break\n second = 0\n for j in range(i+1, len(A)-1):\n second += A[j]\n if second > avg: break\n third = total - (first + second)\n # print(i, j)\n # print(\"1st:%s\" % first)\n # print(\"2nd:%s\" % second)\n # print(\"3rd:%s\" % third)\n if first == second == third:\n return True\n return False\n\n\n\nclass Solution_1:\n \"\"\"\n I gave up and glance an other solution\n Runtime: 380 ms, faster than 21.08% of Python3 online submissions for Partition Array Into Three Parts With Equal Sum.\nMemory Usage: 20.4 MB, less than 6.25% of Python3 online submissions for Partition Array Into Three Parts With Equal Sum.\n \"\"\"\n def canThreePartsEqualSum(self, A: List[int]) -> bool:\n total = sum(A)\n if total % 3 != 0: return False\n avg = int(total /3)\n count = 0\n local = 0\n for i, a in enumerate(A):\n local += a\n if local == avg:\n #logging.debug(\"i:%s found local ans\" % i)\n count += 1\n local = 0\n return count == 3 and local == 0\n\n\n\nclass Solution_2:\n \"\"\"\n Runtime: 368 ms, faster than 47.53% of Python3 online submissions for Partition Array Into Three Parts With Equal Sum.\n Memory Usage: 20.6 MB, less than 6.25% of Python3 online submissions for Partition Array Into Three Parts With Equal Sum.\n \"\"\"\n def canThreePartsEqualSum(self, A: List[int]) -> bool:\n total = sum(A)\n if total % 3 != 0: return False\n avg = int(total /3)\n count = 0\n local = 0\n length = len(A)\n for i in range(length):\n local += A[i]\n if local == avg:\n count += 1\n local = 0\n return count == 3 and local == 0\n\n\n\nclass Solution:\n \"\"\"\n Runtime: 368 ms, faster than 47.53% of Python3 online submissions for Partition Array Into Three Parts With Equal Sum.\n Memory Usage: 20.6 MB, less than 6.25% of Python3 online submissions for Partition Array Into Three Parts With Equal Sum.\n \"\"\"\n def canThreePartsEqualSum(self, A: List[int]) -> bool:\n total = sum(A)\n if total % 3 != 0: return False\n avg = int(total /3)\n count = 1\n local = 0\n length = len(A)\n for i in range(length):\n local += A[i]\n if local == avg * count:\n count += 1\n return count == 4\n\n\"\"\"\n\n1435pm timeexceeded...\n\n\n\nInput: [0,2,1,-6,6,-7,9,1,2,0,1]\nOutput: true\nExplanation: 0 + 2 + 1 = -6 + 6 - 7 + 9 + 1 = 2 + 0 + 1\n\"\"\"\nsamples = [\n [0,3, 7, 11],\n#0,1 ^ ^\n#0,2 ^ ^\n#1,2 ^ ^\n [0,2,1,-6,6,-7,9,1,2,0,1], # true\n [0,2,1,-6,6,7,9,-1,2,0,1], # false\n [3,3,6,5,-2,2,5,1,-9,4], # true\n [12,-4,16,-5,9,-3,3,8,0], # true\n\n]\nfor sample in samples:\n print(Solution().canThreePartsEqualSum(sample))\n\n","sub_path":"lc/esy/20190809_esy_1013_partition_array_into_three_parts_with_equal_sum.py","file_name":"20190809_esy_1013_partition_array_into_three_parts_with_equal_sum.py","file_ext":"py","file_size_in_byte":4970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"224724971","text":"from django.contrib.auth import login\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.models import User\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.contrib import messages\nfrom django.core.paginator import Paginator\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.template.loader import render_to_string\nfrom django.urls import reverse, reverse_lazy\nfrom django.utils.encoding import force_text, force_bytes\nfrom django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode\nfrom django.views.generic import View, UpdateView, DeleteView \nfrom .forms import UserRegisterForm, AnnouncementForm, PostForm, ProfileForm\nfrom .models import Profile, Announcement, Comment, Reply, Post, Deposit, Wallet, Plans\nfrom .tokens import account_activation_token\nfrom requests import Request, Session\nfrom requests.exceptions import ConnectionError, Timeout, TooManyRedirects\nimport json\n\nfrom chat.models import Message\n\n\n\ndef index(request):\n url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest'\n parameters = {\n 'start': '1',\n 'limit': '15',\n 'convert': 'USD'\n }\n headers = {\n 'Accepts': 'application/json',\n 'X-CMC_PRO_API_KEY': '43be9fd4-af5b-4dbe-9379-6f749d993f8d',\n }\n\n session = Session()\n session.headers.update(headers)\n try:\n response = session.get(url, params=parameters)\n coins = response.json()['data']\n except ConnectionError as e:\n print(e)\n coins = \"No response\"\n\n ann = Announcement.objects.all\n posts = Post.objects.all\n plans = Plans.objects.all\n if request.user.is_authenticated:\n user = Profile.objects.get(user=request.user)\n capital = float(request.user.profile.balance)\n balance = float(request.user.profile.balance)\n pot = float(user.pot)\n com = float(request.user.profile.commission)\n withdraw = 0.8*balance\n plans = Plans.objects.all\n my_wallets = Wallet.objects.filter(owner=request.user) \n context = {'ann': ann, 'posts': posts, 'coins': coins, 'capital': capital, 'plans':plans,\n 'withdraw': withdraw, 'com':com}\n return render(request, 'cap/index.html', context) \n \n context = {'ann': ann, 'posts': posts, 'coins': coins, 'plans':plans }\n return render(request, 'cap/index.html', context)\n \nclass SignUpView(View):\n form_class = UserRegisterForm\n template_name = 'cap/signup.html'\n \n\n def get(self, request, *args, **kwargs):\n form = self.form_class()\n return render(request, self.template_name, {'form': form})\n\n def post(self, request, *args, **kwargs):\n form = self.form_class(request.POST)\n if form.is_valid():\n user = form.save(commit=False)\n user.is_active = True\n user.save()\n\n return redirect('login')\n\n return render(request, self.template_name, {'form': form})\n\n\nclass ActivateAccount(View):\n\n def get(self, request, uidb64, token, *args, **kwargs):\n try:\n uid = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n except (TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n\n if user is not None and account_activation_token.check_token(user, token):\n user.is_active = True\n user.profile.email_confirmed = True\n user.save()\n login(request, user)\n messages.success(request, ('Your account have been confirmed.'))\n return redirect('home')\n else:\n messages.warning(request, ('The confirmation link was invalid, possibly because it has already been used.'))\n return redirect('home')\n\n\ndef announcement_form(request):\n if request.method == 'POST':\n form = AnnouncementForm(request.POST)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('home'))\n else:\n form = AnnouncementForm()\n context = {'form': form}\n return render(request, 'cap/announcement_form.html', context)\n\n\n@login_required\ndef profile_form(request):\n if request.method == 'POST':\n profile_form = ProfileForm(request.POST, request.FILES, instance=request.user.profile)\n\n if profile_form.is_valid():\n profile_form.save()\n return HttpResponseRedirect(reverse('home'))\n\n else:\n profile_form = ProfileForm(instance=request.user.profile)\n\n context = {\n 'p_form': profile_form\n }\n return render(request, 'cap/profile_form.html', context)\n\n\ndef post_list(request):\n posts = Post.objects.filter(status=1).order_by('-created_on')\n paginator = Paginator(posts, 25)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n context = {'posts': posts, 'page_obj': page_obj}\n if request.user.is_authenticated:\n note_comments = Comment.objects.filter(post__creator=request.user, not_status='unseen').exclude(\n name=request.user)\n note_mentions = Comment.objects.filter(body__icontains=request.user, not_status='unseen').exclude(\n name=request.user)\n note_replies = Reply.objects.filter(comment__name=request.user, not_status='unseen')\n context = {'note_comments': note_comments, 'note_mentions': note_mentions, 'note_replies': note_replies,\n 'posts': posts, 'page_obj': page_obj}\n return render(request, 'cap/post_list.html', context)\n\n\ndef post_detail(request, slug):\n post = get_object_or_404(Post, slug=slug)\n post.visits = post.visits + 1\n post.save()\n post = get_object_or_404(Post, slug=slug)\n context = {'post': post, 'slug': slug, 'visits': post.visits,\n }\n \n return render(request, 'cap/post_detail.html', context)\n\n\n@login_required\ndef post_create(request):\n if request.method == 'POST':\n form = PostForm(request.POST, request.FILES)\n if form.is_valid():\n post = form.save(commit=False)\n post.creator = request.user\n post.save()\n return HttpResponseRedirect(reverse('home'))\n else:\n form = PostForm()\n context = {'form': form}\n if request.user.is_authenticated:\n note_comments = Comment.objects.filter(post__creator=request.user, not_status='unseen').exclude(\n name=request.user)\n note_mentions = Comment.objects.filter(body__icontains=request.user, not_status='unseen').exclude(\n name=request.user)\n note_replies = Reply.objects.filter(comment__name=request.user, not_status='unseen')\n context = {'note_comments': note_comments, 'note_mentions': note_mentions, 'note_replies': note_replies,\n 'form': form}\n return render(request, 'cap/post_form.html', context)\n\n\nclass PostUpdate(UpdateView, LoginRequiredMixin):\n template_name = 'cap/post_update.html'\n model = Post\n form_class = PostForm\n\n\nclass PostDelete(DeleteView, LoginRequiredMixin):\n model = Post\n success_url = reverse_lazy('home')\n\n\n@login_required\ndef settings(request):\n context = {}\n return render(request, 'cap/settings.html', context)\n\n\n@login_required\ndef trade(request):\n url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest'\n parameters = {\n 'start': '1',\n 'limit': '12',\n 'convert': 'USD'\n }\n headers = {\n 'Accepts': 'application/json',\n 'X-CMC_PRO_API_KEY': '43be9fd4-af5b-4dbe-9379-6f749d993f8d',\n }\n\n session = Session()\n session.headers.update(headers)\n\n try:\n response = session.get(url, params=parameters)\n coins = response.json()['data']\n except ConnectionError as e:\n coins = \"No response\"\n\n context = {'coins': coins}\n return render(request, 'cap/trade.html', context)\n\n\n\ndef payment(request):\n context = {}\n return render(request, 'cap/payment.html', context)\n \n\n@login_required\ndef buy(request):\n cash = float(request.POST.get('cash'))\n price = request.POST.get('price')\n name = request.POST.get('name')\n sym = request.POST.get('sym')\n bal = request.user.profile.balance\n buy = float(price)/float(cash)\n context = {'name':name, 'buy':buy, 'cash':cash, 'price':price, 'sym':sym, 'bal':bal}\n return render(request, 'cap/buy.html', context)\n\n\n@login_required\ndef wallets(request):\n url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest'\n parameters = {\n 'start': '1',\n 'limit': '12',\n 'convert': 'USD'\n }\n headers = {\n 'Accepts': 'application/json',\n 'X-CMC_PRO_API_KEY': '43be9fd4-af5b-4dbe-9379-6f749d993f8d',\n }\n\n session = Session()\n session.headers.update(headers)\n\n try:\n response = session.get(url, params=parameters)\n coins = response.json()['data']\n except ConnectionError as e:\n coins = \"No response\"\n \n cash = request.POST.get('cash')\n price = request.POST.get('price')\n name = request.POST.get('name')\n sym = request.POST.get('sym')\n bal = request.user.profile.balance\n user = Profile.objects.get(user=request.user)\n user.balance = float(user.balance) - float(cash)\n user.save()\n buy = request.POST.get('buy')\n wallets = Wallet.objects.filter(owner=request.user)\n try:\n wallet = Wallet.objects.get(owner=request.user, crypto=name)\n wallet.total = float(wallet.total) + float(buy)\n wallet.save()\n except Wallet.DoesNotExist:\n wallet = Wallet.objects.create(owner=request.user, crypto=name, total=buy, cost=cash)\n wallet.save()\n context = {'name':name, 'buy':buy, 'cash':cash, 'price':price, 'wallets':wallets, 'sym':sym, 'bal':bal,\n 'coins': coins\n }\n return render(request, 'cap/wallets.html', context)\n\n\n@login_required\ndef my_wallets(request):\n wallets = Wallet.objects.filter(owner=request.user)\n\n context = {'wallets':wallets}\n return render(request, 'cap/my_wallets.html', context)\n\n\ndef terms(request): \n return render(request, 'cap/includes/terms.html')\n\n\ndef faq(request): \n return render(request, 'cap/includes/faq.html')\n\n\n\n@login_required\ndef plan_detail(request): \n plan = request.POST.get('plan')\n cost = request.POST.get('cost')\n earn = request.POST.get('earn')\n user = Profile.objects.get(user=request.user)\n pot = float(user.pot)\n pot = pot + float(earn)\n user.balance = float(user.balance) - float(cost)\n user.pot = pot\n user.plan = plan\n user.save() \n context = {'plan':plan, 'cost':cost, 'earn':earn}\n return render(request, 'cap/includes/plan_detail.html', context)\n\n\n@login_required\ndef admin(request):\n profiles = Profile.objects.all().order_by('-joined')\n\n context = { 'profiles':profiles\n }\n return render(request, 'cap/admin.html', context)\n\n\nclass ProfileUpdate(UpdateView, LoginRequiredMixin):\n template_name = 'cap/profile_update.html'\n model = Profile\n form_class = ProfileForm\n success_url = reverse_lazy('admin')\n\n\nclass ProfileDelete(DeleteView, LoginRequiredMixin):\n model = Profile\n success_url = reverse_lazy('admin')\n\n\n\n\n\n","sub_path":"cap/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"116505507","text":"import unittest\n\nimport mock\nfrom orm.services.resource_distributor.rds.sot.git_sot.git_sot import GitSoT\nfrom orm.services.resource_distributor.rds.sot import sot_factory\n\n\nclass SoTFactoryTests(unittest.TestCase):\n def setUp(self):\n super(SoTFactoryTests, self).setUp()\n self.addCleanup(mock.patch.stopall)\n git_factory = mock.MagicMock()\n git_factory.get_git_impl = mock.MagicMock()\n\n def test_get_sot_no_sot_type(self):\n \"\"\"Check that a runtime error is raised if no git type\n is available from config\n \"\"\"\n sot_factory.sot_type = \"\"\n with self.assertRaises(RuntimeError):\n sot_factory.get_sot()\n\n# def test_get_sot_git_type(self):\n# \"\"\" Check that when 'git' type is provided the returned object\n# is instance of GiTSoT\n# \"\"\"\n# sot_factory.sot_type = \"git\"\n# obj = sot_factory.get_sot()\n# self.assertIsInstance(obj, GitSoT)\n\n def test_get_sot_git_sot_params(self):\n sot_factory.sot_type = \"git\"\n sot_factory.local_repository_path = \"2\"\n sot_factory.relative_path_format = \"3\"\n sot_factory.commit_message_format = \"4\"\n sot_factory.commit_user = \"5\"\n sot_factory.commit_email = \"6\"\n sot_factory.git_server_url = \"7\"\n sot_factory.git_type = \"gittle\"\n\n obj = sot_factory.get_sot()\n self.assertEqual(GitSoT.local_repository_path, \"2\", \"local_repository_path not match\")\n self.assertEqual(GitSoT.relative_path_format, \"3\", \"relative_path_format not match\")\n self.assertEqual(GitSoT.commit_message_format, \"4\", \"commit_message_format not match\")\n self.assertEqual(GitSoT.commit_user, \"5\", \"commit_user not match\")\n self.assertEqual(GitSoT.commit_email, \"6\", \"commit_email not match\")\n self.assertEqual(GitSoT.git_server_url, \"7\", \"git_server_url not match\")\n self.assertEqual(GitSoT.git_type, \"gittle\", \"git_type not match\")\n","sub_path":"orm/tests/unit/rds/sot/test_sot_factory.py","file_name":"test_sot_factory.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"339402857","text":"#encoding: utf-8\nimport unittest\nfrom alchemytools.context import managed\n\nfrom m2tool.db import Metadata, Session\nfrom m2tool.db.host import Host\nfrom m2tool.commands.host import add, remove, update\nfrom m2tool.db.server import Server\n\nclass HostCommandTest(unittest.TestCase):\n\n def setUp(self):\n Metadata.drop_all()\n Metadata.create_all()\n #Create a new server by test\n self.server = Server(id=1,name='localhost-full', port=80, chroot='/var/m2', bind_addr='0.0.0.0', pid_File='/run/pid.1',\n default_host='jj.com', access_log='/logs/access.log', error_log='/logs/error.log', use_ssl=True, uuid='12e4-abcd-5678-efgh')\n\n\n def test_create_new_host_add(self):\n with managed(Session) as session:\n self.assertEquals(0, len(session.query(Host).all()))\n\n session.add(self.server)\n session.commit()\n self.assertEquals(1, self.server.id)\n self.assertEquals(80, self.server.port)\n\n add(server_id=1, name=\"m2tool.br\" , matching=\"m2tool.br\", maintenance=False)\n hosts = session.query(Host).all()\n\n self.assertEquals(1, len(hosts))\n self.assertEquals(\"m2tool.br\", hosts[0].name)\n self.assertEquals(\"m2tool.br\", hosts[0].matching)\n self.assertFalse(hosts[0].maintenance)\n\n def test_server_exists_to_add_host(self):\n with managed(Session) as session:\n add(server_id=2, name=\"m2tool.br\" , matching=\"m2tool.br\", maintenance=False)\n\n self.assertEquals(0, len(session.query(Host).all()))\n\n def test_remove_host(self):\n with managed(Session) as session:\n session.add(self.server)\n session.commit()\n\n add(server_id=1, name=\"m2tool.br\" , matching=\"m2tool.br\", maintenance=False)\n\n self.assertEquals(1, session.query(Host).filter_by(id=1).count())\n\n remove(id=[1])\n\n self.assertEquals(0, session.query(Host).filter_by(id=1).count())\n\n def test_update_host(self):\n with managed(Session) as session:\n session.add(self.server)\n session.commit()\n\n add(server_id=1, name=\"m2tool.br\" , matching=\"m2tool.br\", maintenance=False)\n\n update(id=1, name=\"m2tool.com\", matching=\"m2tool.com\")\n\n with managed(Session) as session:\n host = session.query(Host).filter_by(id=1)[0]\n\n self.assertEquals(host.name, \"m2tool.com\")\n self.assertEquals(host.matching, \"m2tool.com\")\n self.assertFalse(host.maintenance)\n\n def test_host_not_exist(self):\n with managed(Session) as session:\n hosts = session.query(Host).all()\n self.assertEquals(0, len(hosts))\n update(id=1, name=\"m2tool.com\", matching=\"m2tool.com\")\n\n with managed(Session) as session:\n hosts = session.query(Host).all()\n self.assertEquals(0, len(hosts))\n\n\n\n def test_update_to_server_not_exist(self):\n with managed(Session) as session:\n session.add(self.server)\n session.commit()\n\n add(server_id=1, name=\"m2tool.br\" , matching=\"m2tool.br\", maintenance=False)\n update(id=1, server_id=2)\n\n with managed(Session) as session:\n host = session.query(Host).filter_by(id=1)[0]\n self.assertEquals(host.server_id, 1)\n\n\n def tearDown(self):\n Metadata.drop_all()\n Metadata.create_all()","sub_path":"test/host_test.py","file_name":"host_test.py","file_ext":"py","file_size_in_byte":3443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"359889912","text":"import csv\r\nimport itertools\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\n\r\n\r\n\r\ncarddeck = []\r\n\r\npict = ['h', 's', 'd', 'c']\r\n\r\nfor i in range(2, 15):\r\n for j in pict:\r\n carddeck.append(str(i)+j)\r\n\r\n\r\n# print(carddeck)\r\n\r\n# separate the card into number and pitcure\r\nclass card(object):\r\n def num(self, name):\r\n self.name = name\r\n num = name[:-1]\r\n return (int(num))\r\n\r\n def pic(self, name):\r\n self.name = name\r\n pic = name[-1]\r\n return (pic)\r\n\r\n\r\n'''\t\t\r\nx = card()\r\nprint(x.num('13s'))\r\nprint(x.pic('11j'))\r\n'''\r\n\r\n\r\nclass handclass(card):\r\n def hand(self, h1, h2, h3, h4, h5):\r\n x = card()\r\n # sort the numbers\r\n tnl = [x.num(h1), x.num(h2), x.num(h3), x.num(h4), x.num(h5)]\r\n nl = sorted(tnl)\r\n # get the picture\r\n p = [x.pic(h1), x.pic(h2), x.pic(h3), x.pic(h4), x.pic(h5)]\r\n # rank the hand\r\n if nl[0]+4 == nl[1]+3 == nl[2]+2 == nl[3]+1 == nl[4] and p[0] == p[1] == p[2] == p[3] == p[4]:\r\n return (9)\r\n #print('Straight flush!!')\r\n\r\n elif nl[0]+12 == nl[1]+11 == nl[2]+10 == nl[3]+9 == nl[4] == 14 and p[0] == p[1] == p[2] == p[3] == p[4]:\r\n return (9)\r\n #print('Straight flush!!')\r\n \r\n elif nl[0] == nl[1] == nl[2] == nl[3] or nl[1] == nl[2] == nl[3] == nl[4]:\r\n return (8)\r\n #print('Four cards!!')\r\n\r\n elif nl[0] == nl[1] == nl[2] and nl[3] == nl[4] or nl[0] == nl[1] and nl[2] == nl[3] == nl[4]:\r\n return (7)\r\n #\tprint('Full house!!')\r\n\r\n elif p[0] == p[1] == p[2] == p[3] == p[4]:\r\n return (6)\r\n #\tprint('Flush!!')\r\n\r\n elif nl[0]+4 == nl[1]+3 == nl[2]+2 == nl[3]+1 == nl[4] or nl[0]+12 == nl[1]+11 == nl[2]+10 == nl[3]+9 == nl[4] == 14:\r\n return (5)\r\n # print('Straight!!')\r\n\r\n elif nl[0] == nl[1] == nl[2] or nl[1] == nl[2] == nl[3] or nl[2] == nl[3] == nl[4]:\r\n return (4)\r\n #print('Three cards!!')\r\n\r\n elif nl[0] == nl[1] and nl[2] == nl[3] or nl[0] == nl[1] and nl[3] == nl[4] or nl[1] == nl[2] and nl[3] == nl[4]:\r\n return (3)\r\n #print('Two pair!!')\r\n\r\n elif nl[0] == nl[1] or nl[1] == nl[2] or nl[2] == nl[3] or nl[3] == nl[4]:\r\n return (2)\r\n #print('One pair!!')\r\n\r\n else:\r\n return (1)\r\n #\tprint('High card!!')\r\n\r\n\r\nclass handpoint(handclass):\r\n def points(self, myhand):\r\n #handlist = ('high card!!','one pair!!','two pair!!','three cards!!','straight!!','flush!!','fullhouse!!','four cards!!','stright flush!!')\r\n\r\n if len(myhand) != len(set(myhand)):\r\n return 0\r\n \r\n else:\r\n ah = []\r\n # checking all 5 card conbination\r\n for v in itertools.combinations(myhand, 5):\r\n # returning the point and sort\r\n ah += str(handclass.hand(self, v[0], v[1], v[2], v[3], v[4]))\r\n ag = sorted(ah)\r\n # printing the highest point\r\n return ag[-1]\r\n \r\n\r\n\r\n\r\nwith open('pro.csv','a') as f:\r\n writer = csv.writer(f)\r\n columnlist = ['',]\r\n for c in itertools.combinations(carddeck,2):\r\n columnlist.append(c)\r\n\r\n writer.writerow(columnlist)\r\n\r\n for t in itertools.combinations(carddeck,5):\r\n body_list = [t]\r\n for c in itertools.combinations(carddeck,2): \r\n myhand = list(c) + list(t)\r\n point = handpoint()\r\n p = point.points(myhand)\r\n body_list.append(p)\r\n \r\n writer.writerow(body_list)\r\n \r\n\r\n","sub_path":"cardpro.py","file_name":"cardpro.py","file_ext":"py","file_size_in_byte":3648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"562289196","text":"'''\n (c) Copyright 2022\n All rights reserved\n Programs written by Yasser Abduallah\n Department of Computer Science\n New Jersey Institute of Technology\n University Heights, Newark, NJ 07102, USA\n\n Permission to use, copy, modify, and distribute this\n software and its documentation for any purpose and without\n fee is hereby granted, provided that this copyright\n notice appears in all copies. Programmer(s) makes no\n representations about the suitability of this\n software for any purpose. It is provided \"as is\" without\n express or implied warranty.\n\n @author: Yasser Abduallah\n'''\n\nfrom __future__ import print_function\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' \nimport tensorflow as tf\ntry: \n tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\nexcept Exception as e:\n print('')\nimport numpy as np\nfrom datetime import datetime, timedelta\nimport argparse\nimport time\nfrom time import sleep\nimport math\nimport random\nfrom tensorflow import keras\nfrom tensorflow.keras import layers,models\nfrom tensorflow.keras.callbacks import EarlyStopping\nimport shutil\n\nfrom sklearn.utils import class_weight\nfrom SEP_utils import *\nfrom SEP_attention import *\n\nclass SEPModel:\n model = None\n model_name = None\n callbacks = None\n input = None\n input_shape = None\n loss = None\n adam_lr = None\n metrics = None\n \n def __init__(self,model_name='SEPModel',early_stopping_patience=3):\n self.model_name = model_name\n callbacks = [EarlyStopping(monitor='loss', patience=early_stopping_patience)]\n\n if tf.test.gpu_device_name() != '/device:GPU:0':\n print('WARNING: GPU device not found.')\n else:\n print('SUCCESS: Found GPU: {}'.format(tf.test.gpu_device_name()))\n physical_devices = tf.config.list_physical_devices('GPU')\n if len(physical_devices ) > 0:\n tf.config.experimental.set_memory_growth(physical_devices[0], enable=True)\n \n \n def build_base_model(self,\n input_shape):\n input = keras.Input(shape=input_shape)\n self.input = input \n self.input_shape = input_shape\n model = layers.Dense(100, kernel_regularizer=l2(0.001))(input)\n model = layers.Dense(100, kernel_regularizer=l2(0.001))(input)\n model = layers.Bidirectional(layers.LSTM(units=400,kernel_regularizer=l2(0.001), return_sequences=True, dropout=0.5, recurrent_dropout=0.5))(model)\n model = layers.Bidirectional(layers.LSTM(units=100,return_sequences=True,kernel_regularizer=l2(0.001)))(model)\n model = SEPAttention()(model)\n model = layers.Flatten()(model) \n model = layers.Dense(1, activation='sigmoid',kernel_regularizer=l2(0.001))(model)\n model = layers.BatchNormalization(momentum=0.9)(model)\n self.model = model\n return model \n\n def models(self):\n self.model = models.Model(self.input, self.model)\n \n def summary(self):\n self.model.summary()\n \n def compile(self,loss='binary_crossentropy',metrics=['accuracy'], adam_lr=0.001):\n self.model.compile(optimizer=tf.optimizers.Adam(learning_rate=adam_lr),loss=loss, metrics=metrics)\n \n def fit(self,\n X_train, \n y_train,\n X_valid=None, \n y_valid=None,\n epochs=2,\n verbose=0,\n batch_size=2048):\n validation_data = None \n if X_valid and y_valid:\n validation_data =[X_valid, y_valid]\n starting_time = datetime.now()\n \n history = self.model.fit(X_train, \n y_train, \n epochs=epochs, \n verbose=verbose, \n batch_size=batch_size,\n callbacks=[keras.callbacks.ProgbarLogger()])\n total_seconds = int((datetime.now() - starting_time).total_seconds())\n per_step = total_seconds//4\n print('11/11','-', str(int((datetime.now() - starting_time).total_seconds())) + 's', '-','loss:',\n round(np.array(history.history['loss']).min(),4),'-', 'accuracy:', \n round(np.array(history.history['accuracy']).max(),4),end=' ')\n # print('1/1 [==============================]','-', per_step, 's/step')\n return history\n \n def predict(self,X_test,verbose=1):\n predictions = self.model.predict(X_test,\n verbose=verbose,\n batch_size=len(X_test))\n return np.squeeze(predictions) \n \n def save_weights(self,e_type='fc',time_window=12,w_dir=None):\n e_type = str(e_type).lower().replace('_s','')\n weight_dir = 'models' + os.sep + 'sep_model_' + str(e_type) + '_' + str(time_window) + 'hr'\n if w_dir is not None:\n weight_dir = w_dir + os.sep + 'sep_model_' + str(e_type) + '_' + str(time_window) + 'hr'\n if os.path.exists(weight_dir):\n shutil.rmtree(weight_dir)\n os.makedirs(weight_dir)\n self.model.save_weights(weight_dir + os.sep + 'model_weights')\n \n def load_weights(self,e_type='fc',time_window=12,w_dir=None):\n e_type = str(e_type).lower().replace('_s','')\n weight_dir = 'models' + os.sep + 'sep_model_' + str(e_type) + '_' + str(time_window) + 'hr'\n if w_dir is not None:\n weight_dir = w_dir + os.sep + 'sep_model_' + str(e_type) + '_' + str(time_window) + 'hr'\n print('Loading weights from model dir:', weight_dir)\n if not os.path.exists(weight_dir):\n print( 'Error: Model weights directory does not exist:', weight_dir)\n if not w_dir == 'default_models':\n print('Trying pre trained default models directory: default_models')\n weight_dir = 'default_models' + os.sep + 'sep_model_' + str(e_type) + '_' + str(time_window) + 'hr'\n if not os.path.exists(weight_dir):\n print( 'Error: Model weights for default directory does not exist:', weight_dir)\n exit()\n else:\n exit()\n self.build_model(weight_dir + os.sep + 'model_weights') \n if self.model == None :\n print('Error: You must train a model first before loading the weights.')\n exit()\n print('Loading weights from:', weight_dir + os.sep + 'model_weights') \n self.model.load_weights(weight_dir + os.sep + 'model_weights').expect_partial()\n \n def load_model(self,input_shape=(series_len,n_features),\n e_type='FC_S',\n time_window=12,\n loss='binary_crossentropy',\n metrics=['accuracy'],\n adam_lr=0.0001,\n w_dir=None):\n self.input_shape = input_shape \n self.adam_lr = adam_lr\n self.metrics = metrics \n self.loss = loss \n \n # self.build_base_model(input_shape)\n # self.models()\n # self.compile(loss=loss, metrics=metrics, adam_lr=adam_lr)\n e_type=str(e_type).lower()\n self.load_weights(e_type=e_type, time_window=time_window,w_dir=w_dir)\n \n def save(self,dir_name):\n os.makedirs(dir_name, exist_ok=True)\n \n def build_model(self,w):\n print('Building model for:', w)\n from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file\n from tensorflow.python.training import py_checkpoint_reader\n reader = py_checkpoint_reader.NewCheckpointReader(w)\n maps = reader.get_variable_to_shape_map()\n b = maps['layer_with_weights-0/kernel/.ATTRIBUTES/VARIABLE_VALUE'][0]\n a = maps['layer_with_weights-3/att_bias/.ATTRIBUTES/VARIABLE_VALUE'][0]\n self.input_shape = (a,b)\n self.build_base_model(self.input_shape)\n self.models()\n self.compile(loss=self.loss, \n metrics=self.metrics, \n adam_lr=self.adam_lr)\n","sub_path":"SEP_Package/SEP_model.py","file_name":"SEP_model.py","file_ext":"py","file_size_in_byte":8056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"598114440","text":"__author__ = 'Amin'\n\nfrom collections import OrderedDict\nimport collections\nimport math\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\n\n\n\ndef cosine_similarity(v1, v2):\n dotp = 0\n s = 0.0\n\n l2 = v2.keys()\n\n for item in l2:\n if item in v1:\n dotp += float(v2.get(item)) * float(v1.get(item))\n\n for w in v1.values():\n w = float(w)\n s += w * w\n len1 = math.sqrt(s)\n\n s = 0.0\n for w in v2.values():\n w = float(w)\n s += w * w\n len2 = math.sqrt(s)\n\n return dotp / (len1 * len2)\n\n\ndef cal_precision(res):\n testFile = open(\"Test/msd_test_hidden.txt\", 'r')\n\n total = 0\n hidden_dict = {}\n for line in testFile:\n tokens = line.split()\n userName = tokens[0]\n itemName = tokens[1]\n\n if not userName in hidden_dict:\n list = []\n list.append(itemName)\n hidden_dict[userName] = list\n else:\n hidden_dict[userName].append(itemName)\n total += 1\n\n\n count = 0\n\n for (user, recommend_songs) in res.items():\n # total += len(recommend_songs)\n for song in recommend_songs:\n if song in hidden_dict[user]:\n count += 1\n\n print(count / total)\n\n\nuserMap = {}\nitemMap = collections.defaultdict(OrderedDict)\nuserItemMap = collections.defaultdict(dict)\nuserTestMap = collections.defaultdict(dict)\n\nitemReverseMap = collections.defaultdict()\nuserTestReverseMap = collections.defaultdict()\ntrainFile = open('Test/msd_train_visible.txt', 'r')\ntestFile = open(\"Test/msd_test_visible.txt\", 'r')\n# fileOut = open('out_file.txt', 'a')\n\nfor line in trainFile:\n\n tokens = line.split()\n userName = tokens[0]\n itemName = tokens[1]\n numberOfUsers = len(userMap)\n numberOfItems = len(itemMap)\n\n if not userName in userMap:\n userMap[userName] = numberOfUsers + 1\n userValue = numberOfUsers + 1\n else:\n userValue = userMap.get(userName)\n\n if not itemName in itemMap:\n itemMap[itemName] = numberOfItems + 1\n itemValue = numberOfItems + 1\n itemReverseMap[numberOfItems+1] = itemName\n else:\n itemValue = itemMap.get(itemName)\n\n userItemMap[userValue][itemValue] = tokens[2]\nprint(\"Done1\")\n\n\nfor line in testFile:\n tokens = line.split()\n userName = tokens[0]\n itemName = tokens[1]\n numberOfUsers = len(userMap)\n numberOfItems = len(itemMap)\n\n if not userName in userMap:\n userMap[userName] = numberOfUsers + 1\n userValue = numberOfUsers + 1\n userTestReverseMap[numberOfUsers + 1] = userName\n else:\n userValue = userMap.get(userName)\n\n if not itemName in itemMap:\n itemMap[itemName] = numberOfItems + 1\n itemValue = numberOfItems + 1\n else:\n itemValue = itemMap.get(itemName)\n\n userTestMap[userValue][itemValue] = tokens[2]\nprint(\"Done2\")\n\n\ncosineDistance = collections.defaultdict(dict)\nfor testUser in userTestMap.items():\n for trainUser in userItemMap.items():\n val = cosine_similarity(trainUser[1], testUser[1])\n cosineDistance[testUser[0]][trainUser[0]] = val\nprint(\"Done3\")\n\nlenCos = len(cosineDistance)\n\nK = 10\nnearestNeighbors = [[]]\nfor i in range(0, lenCos - 1, 1):\n topKUsers = sorted(list(cosineDistance.items())[i][1].items(), key=lambda t: float(t[1]), reverse=True)\n userID = [[]]\n for j in range(0, K, 1):\n userID.append(topKUsers[j][0])\n nearestNeighbors.append([list(cosineDistance.items())[i][0], userID[1:]])\n\nN = 30\nlenNearestNeighbor = len(nearestNeighbors)\n\n\nres = {}\nfor i in range(1, lenNearestNeighbor - 1, 1):\n currTestMap = userTestMap.get(nearestNeighbors[i][0])\n newSongs = collections.defaultdict()\n for j in range(0, K, 1):\n currUserMap = userItemMap.get(nearestNeighbors[i][1][j])\n for k, v in currUserMap.items():\n if not k in currTestMap:\n if not k in newSongs:\n newSongs[k] = v\n else:\n val = newSongs.get(k)\n newVal = max(val, v)\n newSongs[k] = newVal\n d = sorted(newSongs.items(), key=lambda t: int(t[1]), reverse=True)\n numElems = min(N, len(d))\n dnew = d[:numElems]\n recommendedItems = collections.OrderedDict(dnew)\n\n\n userName = userTestReverseMap.get(nearestNeighbors[i][0])\n itemNames = []\n for key in recommendedItems.keys():\n itemName = itemReverseMap.get(key)\n itemNames.append(itemName)\n # print(fileOut, userName, \" \", itemNames)\n print(userName, \" \", itemNames)\n res[userName] = itemNames\n\n#cal_precision(res)\n\ntriplet_dataset = pd.read_csv(filepath_or_buffer='Test/msd_test_visible.txt', sep='\\t', header=None)\ntriplet_dataset.columns = ['user', 'song', 'rating']\n\ntriplet_test = pd.read_csv(filepath_or_buffer='Test/msd_test_hidden.txt', sep='\\t', header=None)\ntriplet_test.columns = ['user', 'song', 'rating']\n\nsong_list = list(triplet_dataset['song'].unique())\nsong_list.sort()\n\nuser_list = list(triplet_dataset['user'].unique())\nuser_list.sort()\n\n#%%\npred = []\nfor uid in tqdm(user_list):\n user_song_list = triplet_test.loc[triplet_test['user'] == uid]['song'].tolist()\n user_train_list = triplet_dataset.loc[triplet_dataset['user'] == uid]['song'].tolist()\n if uid in res:\n song_rating = res[uid]\n # song_rating.sort(key = lambda x : x[1], reverse = True)\n p = []\n for k in range(1, N + 1):\n count = 0\n song = 0\n for i in song_rating:\n # if i[0] not in user_train_list:\n song += 1\n if i[0] in user_song_list:\n count += 1\n \n if song >= k:\n break\n # p.append( count / k)\n if i[0] in user_song_list:\n p.append(count / k)\n else:\n p.append(0)\n pred.append(sum(p) / min(N, len(song_rating)))\n \nprint(np.mean(pred))\n\ntestFile.close()\ntrainFile.close()\n# fileOut.close()","sub_path":"codes/KNN_Based_Predictor2.py","file_name":"KNN_Based_Predictor2.py","file_ext":"py","file_size_in_byte":6056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"4433440","text":"from django.urls import path\n\nfrom .views import (index,\n rss_source,\n download_fb2,\n download_pdf,\n PostListView,\n DatePostListView,\n RSSPostListView,\n SearchResultView,\n remove_news)\n\n\nurlpatterns = [\n path('', index, name='index'),\n path('home/rss', rss_source, name='rrs_source'),\n path('home/', PostListView.as_view(), name='home'),\n path('home/date/',\n DatePostListView.as_view(),\n name='news-by-date'),\n path('home/rss_source/',\n RSSPostListView.as_view(),\n name='news-by-rss'),\n path('home/search/',\n SearchResultView.as_view(),\n name='search-result'),\n path('home/remove',\n remove_news,\n name='remove-news'),\n path('render/pdf/',\n download_pdf,\n name='to-pdf'),\n path('render/fb2/',\n download_fb2,\n name='to-fb2')\n]\n","sub_path":"newsfeed/news/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"606391214","text":"#!/usr/bin/env python\n# coding=utf-8\nfrom ansible.module_utils.basic import *\nfrom nsxramlclient.client import NsxClient\n\n\"\"\"vcd_load_balancer.py: vcd load balancer\"\"\"\n\n__copyright__ = \"(c) 2019 Dell Inc. or its subsidiaries. All rights reserved. Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries. Other trademarks may be trademarks of their respective owners.\"\n\ndef lb_config(client_session, module, edge_id):\n lb_config_body = client_session.extract_resource_body_schema('loadBalancer', 'update')\n lb_config_body['loadBalancer']['enabled']='true'\n\n #certificate_id = get_certificate_id(client_session, edge_id)\n app_profiles=[{'applicationProfileId': 'applicationProfile-1',\n 'name': module.params['app_profile_name_https'], 'insertXForwardedFor': 'false',\n 'sslPassthrough':'false', 'template':'HTTPS',\n 'sslPassthrough': 'true'},\n {'applicationProfileId': 'applicationProfile-2',\n 'name': module.params['app_profile_name_http'], 'insertXForwardedFor': 'false',\n 'sslPassthrough':'false', 'template':'HTTP',\n },\n {'applicationProfileId': 'applicationProfile-3',\n 'name': module.params['app_profile_name_tcp'], 'insertXForwardedFor': 'false',\n 'sslPassthrough':'false','template':'TCP',\n 'serverSslEnabled':'false'}]\n\n lb_config_body['loadBalancer']['applicationProfile'] = app_profiles\n lb_config_body['loadBalancer']['monitor'] = {'name': module.params['monitor_name'],\n 'type': module.params['monitor_type'],\n 'interval': module.params['monitor_interval'],\n 'timeout': module.params['monitor_time_out'],\n 'maxRetries': module.params['monitor_retries'],\n 'method': module.params['monitor_url_method'],\n 'url': module.params['monitor_url']}\n\n del lb_config_body['loadBalancer']['applicationRule']\n del lb_config_body['loadBalancer']['virtualServer']\n del lb_config_body['loadBalancer']['pool']\n return client_session.update('loadBalancer', uri_parameters={'edgeId': edge_id},\n request_body_dict=lb_config_body)\n\ndef get_monitor_id(client_session, monitor_name, edge_id):\n monitor_res = client_session.read('lbMonitors', uri_parameters={'edgeId': edge_id})['body']['loadBalancer']['monitor']\n if isinstance(monitor_res, list):\n try:\n monitor_id = [monitor_info['monitorId'] for monitor_info in monitor_res if monitor_info['name'] == 'tcp_monitor']\n except IndexError:\n return None\n else:\n if monitor_res['name'] == monitor_name:\n monitor_id = monitor_res['monitorId']\n return monitor_id\n\ndef get_application_rule_id(client_session, app_rule_name, edge_id):\n app_rules_res = client_session.read('appRules', uri_parameters={'edgeId': edge_id})['body']['loadBalancer']['applicationRule']\n try:\n app_rule_id = [app_rule_info['applicationRuleId'] for app_rule_info in app_rules_res if app_rule_info['name'] == app_rule_name][0]\n except IndexError:\n return None\n\n return app_rule_id\n\ndef get_application_profile_id(client_session, profile_type, edge_id):\n app_profiles_res = client_session.read('applicationProfiles', uri_parameters={'edgeId': edge_id})['body']['loadBalancer']['applicationProfile']\n\n try:\n app_profile_id = [app_profile_info['applicationProfileId'] for app_profile_info in app_profiles_res if app_profile_info['template'] == profile_type][0]\n except IndexError:\n return None\n return app_profile_id\n\ndef get_pool_id(client_session, pool_name, edge_id):\n pools_res = client_session.read('pools', uri_parameters={'edgeId': edge_id})['body']['loadBalancer']['pool']\n\n try:\n pool_id = [pool_info['poolId'] for pool_info in pools_res if pool_info['name'] == pool_name][0]\n except IndexError:\n return None\n return pool_id\n\ndef add_virtual_servers(client_session, module, edge_id):\n virtual_server_config_body = client_session.extract_resource_body_schema('virtualServers', 'create')\n\n https_app_profile_id= get_application_profile_id(client_session, 'HTTPS', edge_id)\n http_app_profile_id= get_application_profile_id(client_session, 'HTTP', edge_id)\n\n https_pool_id = get_pool_id(client_session,\n module.params['https_pool_name'], edge_id)\n http_pool_id = get_pool_id(client_session,\n module.params['http_pool_name'], edge_id)\n\n #http_app_rule_id = get_application_rule_id(client_session,\n # module.params['app_rule_name_http'], edge_id)\n #tcp_app_rule_id = get_application_rule_id(client_session,\n # module.params['app_rule_name_tcp'], edge_id)\n\n virtual_servers=[{'applicationProfileId': https_app_profile_id,\n 'name': module.params['https_virtual_server_name'],'enabled':'true',\n 'ipAddress': module.params['virtual_ip_address'], 'protocol':'https',\n 'port': module.params['https_virtual_server_port'], 'defaultPoolId': https_pool_id,\n },\n {'applicationProfileId': http_app_profile_id,\n 'name': module.params['http_virtual_server_name'], 'enabled':'true',\n 'ipAddress': module.params['virtual_ip_address'], 'protocol':'http',\n 'port': module.params['http_virtual_server_port'],\n 'defaultPoolId': http_pool_id}]\n\n for virtual_server_info in virtual_servers:\n virtual_server_config_body['virtualServer'] = virtual_server_info\n virtual_servers_res = client_session.create('virtualServers',\n uri_parameters={'edgeId': edge_id},\n request_body_dict=virtual_server_config_body)\n return virtual_servers_res\n\ndef add_pools(client_session, module, edge_id):\n '''Function to add pools for load balancer\n inputs:\n client_session: client session of NSX\n type: object\n edge_id: NSX edge id\n type: string\n '''\n pool_config_body = client_session.extract_resource_body_schema('pools', 'create')\n # Get monitor id\n monitor_id = get_monitor_id(client_session, module.params['monitor_name'], edge_id)\n pools_dict = [{'name': module.params['http_pool_name'], 'algorithm':'round-robin',\n 'transparent':'false', 'monitorId':monitor_id,\n 'member': [{'name': module.params['http_pool_first_member_name'],\n 'ipAddress': module.params['http_pool_first_member_ip'],\n 'port': module.params['http_pool_first_member_port'],\n 'monitorPort': module.params['http_pool_first_member_monitor_port']},\n {'name': module.params['http_pool_second_member_name'],\n 'ipAddress': module.params['http_pool_second_member_ip'],\n 'port': module.params['http_pool_second_member_port'],\n 'monitorPort': module.params['http_pool_second_member_monitor_port']}\n ]},\n {'name': module.params['https_pool_name'], 'algorithm':'round-robin',\n 'transparent':'false', 'monitorId':monitor_id,\n 'member': [{'name': module.params['https_pool_first_member_name'],\n 'ipAddress': module.params['https_pool_first_member_ip'],\n 'port': module.params['https_pool_first_member_port'],\n 'monitorPort': module.params['https_pool_first_member_monitor_port']},\n {'name': module.params['https_pool_second_member_name'],\n 'ipAddress': module.params['https_pool_second_member_ip'],\n 'port': module.params['https_pool_second_member_port'],\n 'monitorPort': module.params['https_pool_second_member_monitor_port']}\n ]},\n {'name': module.params['vmrc_pool_name'], 'algorithm':'round-robin',\n 'transparent':'false', 'monitorId':monitor_id,\n 'member': [{'name': module.params['vmrc_pool_first_member_name'],\n 'ipAddress': module.params['vmrc_pool_first_member_ip'],\n 'port': module.params['vmrc_pool_first_member_port'],\n 'monitorPort': module.params['vmrc_pool_first_member_monitor_port']},\n {'name': module.params['vmrc_pool_second_member_name'],\n 'ipAddress': module.params['vmrc_pool_second_member_ip'],\n 'port': module.params['vmrc_pool_second_member_port'],\n 'monitorPort': module.params['vmrc_pool_second_member_monitor_port']}\n ]}]\n\n for pool_info in pools_dict:\n pool_config_body['pool'] = pool_info\n add_pools_res = client_session.create('pools', uri_parameters={'edgeId': edge_id},\n request_body_dict=pool_config_body)\n return add_pools_res\n\ndef get_edge_id(session, edge_name):\n router_res = session.read('nsxEdges', 'read')['body']\n edge_summary_list = router_res['pagedEdgeList']['edgePage']['edgeSummary']\n if isinstance(edge_summary_list, list):\n for edge_summary in edge_summary_list:\n if edge_name.lower() in edge_summary['name'].lower():\n edge_id = edge_summary['objectId']\n return edge_id\n else:\n edge_id = router_res['pagedEdgeList']['edgePage']['edgeSummary']['objectId']\n return edge_id\n\n\ndef disable_firewall(session, edge_id):\n '''Disable firewall'''\n disable_firewall_body = session.extract_resource_body_schema('nsxEdgeFirewallConfig', 'update')\n disable_firewall_body['firewall']['enabled']='false'\n\n del disable_firewall_body['firewall']['defaultPolicy']\n del disable_firewall_body['firewall']['globalConfig']\n del disable_firewall_body['firewall']['rules']\n\n return session.update('nsxEdgeFirewallConfig',\n uri_parameters={'edgeId': edge_id},\n request_body_dict=disable_firewall_body)\n\n\ndef main():\n\n module = AnsibleModule(\n argument_spec=dict(\n state=dict(default='present', choices=['present', 'absent']),\n nsxmanager_spec=dict(required=True, no_log=True, type='dict'),\n nsx_edge_gateway_name=dict(required=True),\n\n app_profile_name_https=dict(required=True),\n app_profile_name_http=dict(required=True),\n app_profile_name_tcp=dict(required=True),\n\n monitor_name=dict(required=True),\n monitor_type=dict(required=True),\n monitor_interval=dict(required=True),\n monitor_time_out=dict(required=True),\n monitor_retries=dict(required=True),\n monitor_url_method=dict(required=True),\n monitor_url=dict(required=True),\n\n http_pool_name=dict(required=True),\n http_pool_first_member_name=dict(required=True),\n http_pool_first_member_ip=dict(required=True),\n http_pool_first_member_port=dict(required=True),\n http_pool_first_member_monitor_port=dict(required=True),\n\n http_pool_second_member_name=dict(required=True),\n http_pool_second_member_ip=dict(required=True),\n http_pool_second_member_port=dict(required=True),\n http_pool_second_member_monitor_port=dict(required=True),\n\n https_pool_name=dict(required=True),\n https_pool_first_member_name=dict(required=True),\n https_pool_first_member_ip=dict(required=True),\n https_pool_first_member_port=dict(required=True),\n https_pool_first_member_monitor_port=dict(required=True),\n\n https_pool_second_member_name=dict(required=True),\n https_pool_second_member_ip=dict(required=True),\n https_pool_second_member_port=dict(required=True),\n https_pool_second_member_monitor_port=dict(required=True),\n\n vmrc_pool_name=dict(required=True),\n vmrc_pool_first_member_name=dict(required=True),\n vmrc_pool_first_member_ip=dict(required=True),\n vmrc_pool_first_member_port=dict(required=True),\n vmrc_pool_first_member_monitor_port=dict(required=True),\n\n vmrc_pool_second_member_name=dict(required=True),\n vmrc_pool_second_member_ip=dict(required=True),\n vmrc_pool_second_member_port=dict(required=True),\n vmrc_pool_second_member_monitor_port=dict(required=True),\n\n http_virtual_server_name=dict(required=True),\n virtual_ip_address=dict(required=True),\n http_virtual_server_port=dict(required=True),\n\n https_virtual_server_name=dict(required=True),\n https_virtual_server_port=dict(required=True),\n ),\n supports_check_mode=False\n )\n\n# from nsxramlclient.client import NsxClient\n client_session=NsxClient(module.params['nsxmanager_spec']['raml_file'],\n module.params['nsxmanager_spec']['host'],\n module.params['nsxmanager_spec']['user'],\n module.params['nsxmanager_spec']['password'])\n\n edge_id = get_edge_id(client_session, module.params['nsx_edge_gateway_name'])\n disable=disable_firewall(client_session, edge_id)\n loadBalancer_config = lb_config(client_session, module, edge_id)\n update_pool_res = add_pools(client_session, module, edge_id)\n virtual_servers = add_virtual_servers(client_session, module, edge_id)\n\n module.exit_json(changed=True, argument_spec=module.params['state'], virtual_servers=virtual_servers)\n\n\n#from ansible.module_utils.basic import *\nif __name__ == '__main__':\n main()\n","sub_path":"library/vcd_load_balancer.py","file_name":"vcd_load_balancer.py","file_ext":"py","file_size_in_byte":14246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"493869833","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error\n\nLEVEL = 0.1 #1-Kr\n\ndef acc(test, pred, level = LEVEL):\n cnt = 0\n for i in range(0, len(test)):\n if (test[i] > level and pred[i] > level) or (test[i] < level and pred[i] < level):\n cnt += 1\n return cnt/len(test)\n \n\ndata = pd.read_csv('~/Downloads/lab3.test.csv', sep='\\t', decimal = ',')\nX = data.drop(['Genome_ID', 'Kr', '1-Kr', '(empty)'], axis=1)\nY = data['1-Kr']\n\nregr = LinearRegression()\nregr.fit(X, Y)\n#print(mean_squared_error(Y, regr.predict(X)))\n#print(list(zip(X.columns,regr.coef_)))\nprint(acc(Y, regr.predict(X)))\n#print(regr.score(X, Y))\n","sub_path":"least_sq.py","file_name":"least_sq.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"355915830","text":"\n\none = [x.strip() for x in ('foo\\n', 'bar\\n', 'baz\\n')]\nprint(one)\n\ntwo = [int(x) for x in ('1', '2', '3')]\nprint(two)\n\n#using a dictionary with list comprehension\nd = {'foo': '10', 'bar': '20', 'baz': '30'}\nthree = [d[x] for x in ['foo', 'baz']]\nprint(three)\n\nd = {'foo': '10', 'bar': '20', 'baz': '30'}\nfour = [int(d[x].rstrip('0')) for x in ['foo', 'baz']]\n","sub_path":"python-programming-workshop/test/data_structures/list/listcomprehensionall.py","file_name":"listcomprehensionall.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"169594072","text":"import os\nimport tarfile\nimport time\n\n# load all imgs from dir\nimg_path = \"test_dir/\"\nimgs = os.listdir(img_path)\n\n\ntarFile_gz = tarfile.open(\"tar_zip/sample_gz.tar.gz\", mode=\"w:gz\")\ntarFile_bz2 = tarfile.open(\"tar_zip/sample_bz2.tar.bz2\", mode=\"w:bz2\")\ntarFile_xz = tarfile.open(\"tar_zip/sample_xz.tar.xz\", mode=\"w:xz\")\n\n\ntic = time.time()\n\nfor img in imgs:\n tarFile_gz.add(img_path + img)\n\ntarFile_gz.close()\ntoc = time.time()\n\nprint(\"gz Time execution : \", toc - tic)\n\n####\n\ntic = time.time()\nfor img in imgs:\n tarFile_bz2.add(img_path + img)\n\ntarFile_bz2.close()\ntoc = time.time()\n\nprint(\"bz2 Time execution : \", toc - tic)\n\n####\n\ntic = time.time()\nfor img in imgs:\n tarFile_xz.add(img_path + img)\n\ntarFile_xz.close()\ntoc = time.time()\n\nprint(\"lzma Time execution : \", toc - tic)","sub_path":"file_comp_dcomp/tarfile_comp.py","file_name":"tarfile_comp.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"446549482","text":"import pickle\nimport pandas\nimport numpy\nfrom fetch_data import read_mongo\n\n\ndef check_for_attacks(predictions,type):\n attack_idxs = []\n for i,prediction in enumerate(predictions):\n if prediction:\n attack_idxs.append(i)\n return attack_idxs\n\ndef load_model_and_infer(BATCH_SIZE = 64,INPUT_SIZE = 78):\n\n trained_model_path = './model/trained_models'\n\n input_df = read_mongo(\"idsdb\",\"flows\")\n\n # # TO TEST\n # input_df = pd.read_csv('../dataset_with_ip.csv')\n # input_df = input_df.drop([' Label',' Destination IP'],axis = 1)\n\n # if(input_df.shape[0] < 1000):\n # return None\n \n # input_df['Flow Bytes/s']=input_df['Flow Bytes/s'].astype('float64')\n # input_df[' Flow Packets/s']=input_df['Flow Packets/s'].astype('float64')\n # input_df.replace(np.inf,np.nan,inplace=True)\n # input_df['Flow Bytes/s'].fillna(input_df['Flow Bytes/s'].mean(),inplace=True)\n # input_df[' Flow Packets/s'].fillna(input_df['Flow Packets/s'].mean(),inplace=True)\n\n # input_df.replace([np.inf, -np.inf], np.nan).dropna(axis=1,inplace=True)\n\n \n # NaN_values=input_df.isnull().sum() \n # for s,i in enumerate(NaN_values):\n # print(i,input_df.columns[s])\n\n \n dos_detector = pickle.load(open(trained_model_path+'/dos_rf.sav','rb'))\n ddos_detector = pickle.load(open(trained_model_path+'/ddos_rf.sav','rb'))\n portscan_detector = pickle.load(open(trained_model_path+'/portscan_rf.sav','rb'))\n # abnormal_detector = pickle.load(open(trained_model_path + '/ab_dt.sav','rb'))\n \n atk_dst_port = []\n atk_type = []\n \n \n \n if(input_df.size > 2000):\n dos_predict = dos_detector.predict(input_df)\n ddos_predict = ddos_detector.predict(input_df)\n portscan_predict = portscan_detector.predict(input_df)\n # abnormal_predict = abnormal_detector.predict(input_df)\n else:\n return (atk_dst_port,atk_type)\n\n # check for attacks\n\n\n attack_idxs_dos = check_for_attacks(dos_predict,'dos')\n for idx in attack_idxs_dos:\n dst_port = input_df.iloc[idx]['Destination Port']\n atk_dst_port.append(int(dst_port))\n atk_type.append(0)\n\n attack_idxs_ddos = check_for_attacks(ddos_predict,'ddos')\n for idx in attack_idxs_ddos:\n dst_port = input_df.iloc[idx]['Destination Port']\n atk_dst_port.append(int(dst_port))\n atk_type.append(1)\n\n attack_idxs_portscan = check_for_attacks(portscan_predict,'portscan')\n for idx in attack_idxs_portscan:\n dst_port = input_df.iloc[idx]['Destination Port']\n atk_dst_port.append(int(dst_port))\n atk_type.append(2)\n\n # attack_idxs_ab = check_for_attacks(abnormal_predict,'unknown')\n # for idx in attack_idxs_ab:\n # dst_port = input_df.iloc[idx]['Destination Port']\n # atk_dst_port.append(int(dst_port))\n # atk_type.append(3)\n\n # for p,t in zip(atk_dst_port,atk_type):\n # print(p,t)\n \n print(\"Complete inferencing\")\n\n del input_df\n return (atk_dst_port,atk_type)\n\n\n \n\n \n\n\n\n\n\n\n ","sub_path":"model/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":2991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"596179959","text":"from flask import Flask, render_template, request\nfrom flask_socketio import SocketIO, emit\n\nimport serial\nfrom serial.tools.list_ports import get_ports_list #用我们自己改过的list_ports获取windows上可用的串口\n\nasync_mode = None\n\napp = Flask(__name__, template_folder='./')\napp.config['SECRET_KEY'] = 'secret!'\nsocketio = SocketIO(app, async_mode=async_mode)\n\nclose_mark = False\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if request.method == \"GET\":\n return render_template('serial_test.html', async_mode=socketio.async_mode)\n if request.method == \"POST\": # 使用http post方式停掉串口\n ser = serial.Serial()\n ser.baudrate = 115200\n ser.port = (get_ports_list()[0][0])\n ser.xonxoff = True\n\n global serial_port\n serial_port = ser.port\n ser.close()\n\n global close_mark\n close_mark = True\n print(\"serial port %s is closed\" % serial_port)\n\n\n # os._exit(0) # 关闭服务器\n\n return \"serial port %s is closed\" % serial_port\n\n@socketio.on('serial_open', namespace='/serial')\ndef serial_open(message):\n\n global close_mark\n close_mark = False\n\n # 定义串口参数\n ser = serial.Serial()\n ser.baudrate = 115200\n ser.xonxoff = True\n\n try:\n ser.port = (get_ports_list()[0][0])\n except:\n print (\"There is no serial port, please check hardware connection.\")\n return\n\n global serial_port\n serial_port = ser.port\n\n # 开启串口\n ser.open()\n\n while close_mark == False:\n check_mark = []\n content_read = ser.read(50).decode(\"utf-8\", \"ignore\").split(\"\\r\\n\")\n for index in range(0, len(content_read)):\n check_mark.append(len(content_read[index].strip()))\n\n index_output = check_mark.index(max(check_mark))\n content_output = content_read[index_output].strip()\n\n emit('my_response', {'data': content_output})\n\n ser.close()\n\nif __name__ == '__main__':\n socketio.run(app)","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"318676835","text":"import numpy as np\r\nimport math\r\nfrom BondDict import *\r\nimport pandas as pd\r\nfrom bisect import bisect_right\r\nimport Bio.PDB.vectors\r\nimport random\r\n\r\nCDL = pd.read_table(\"KernRegr_CDL_v1.2.1_Oct14-2009.txt\",\r\n skiprows=87, delim_whitespace=True)\r\nODL = pd.read_table(\"omegaCDL_OmegaBetweenAsPhi1Psi0_KernRegr_v1.3.1_Aug12-2011.txt\",\r\n skiprows=87, delim_whitespace=True)\r\nNDRD = pd.read_table(r\"NDRD_TCBIG.txt\",\r\n skiprows=59, delim_whitespace=True)\r\n\r\ndef get_vector2(N) :\r\n return Bio.PDB.vectors.Vector(N.x, N.y, N.z)\r\n\r\ndef getAngle(p0 = np.array([0.0,0.0,0.0]), p1 = np.array([0.0,0.0,0.0]), p2 = np.array([0.0,0.0,0.0])):\r\n v12 = p0 - p1\r\n v32 = p2 - p1\r\n cosine_angle = np.dot(v12, v32) / (np.linalg.norm(v12) * np.linalg.norm(v32))\r\n angle = math.degrees(np.arccos(cosine_angle, ))\r\n return angle\r\n\r\n######\r\nfrom Bio.PDB import *\r\nimport warnings\r\ndef calculateCoordinates(refA, refB, refC, L, ang, di):\r\n AV = get_vector2(refA)\r\n BV = get_vector2(refB)\r\n CV = get_vector2(refC)\r\n\r\n CA = AV - CV\r\n CB = BV - CV\r\n\r\n ##CA vector\r\n AX = CA[0]\r\n AY = CA[1]\r\n AZ = CA[2]\r\n\r\n ##CB vector\r\n BX = CB[0]\r\n BY = CB[1]\r\n BZ = CB[2]\r\n\r\n ##Plane Parameters\r\n A = (AY * BZ) - (AZ * BY)\r\n B = (AZ * BX) - (AX * BZ)\r\n G = (AX * BY) - (AY * BX)\r\n\r\n ##Dot Product Constant\r\n F = math.sqrt(BX * BX + BY * BY + BZ * BZ) * L * math.cos(ang * (math.pi / 180.0))\r\n\r\n ##Constants\r\n const = math.sqrt(math.pow((B * BZ - BY * G), 2) * (-(F * F) * (A * A + B * B + G * G) + (\r\n B * B * (BX * BX + BZ * BZ) + A * A * (BY * BY + BZ * BZ) - (2 * A * BX * BZ * G) + (\r\n BX * BX + BY * BY) * G * G - (2 * B * BY) * (A * BX + BZ * G)) * L * L))\r\n denom = (B * B) * (BX * BX + BZ * BZ) + (A * A) * (BY * BY + BZ * BZ) - (2 * A * BX * BZ * G) + (\r\n BX * BX + BY * BY) * (G * G) - (2 * B * BY) * (A * BX + BZ * G)\r\n\r\n X = ((B * B * BX * F) - (A * B * BY * F) + (F * G) * (-A * BZ + BX * G) + const) / denom\r\n\r\n if ((B == 0 or BZ == 0) and (BY == 0 or G == 0)):\r\n const1 = math.sqrt(G * G * (-A * A * X * X + (B * B + G * G) * (L - X) * (L + X)))\r\n Y = ((-A * B * X) + const1) / (B * B + G * G)\r\n Z = -(A * G * G * X + B * const1) / (G * (B * B + G * G))\r\n else:\r\n Y = ((A * A * BY * F) * (B * BZ - BY * G) + G * (-F * math.pow(B * BZ - BY * G, 2) + BX * const) - A * (\r\n B * B * BX * BZ * F - B * BX * BY * F * G + BZ * const)) / ((B * BZ - BY * G) * denom)\r\n Z = ((A * A * BZ * F) * (B * BZ - BY * G) + (B * F) * math.pow(B * BZ - BY * G, 2) + (A * BX * F * G) * (\r\n -B * BZ + BY * G) - B * BX * const + A * BY * const) / ((B * BZ - BY * G) * denom)\r\n\r\n # GET THE NEW VECTOR from the orgin\r\n D = Vector(X, Y, Z) + CV\r\n with warnings.catch_warnings():\r\n # ignore inconsequential warning\r\n warnings.simplefilter(\"ignore\")\r\n temp = calc_dihedral(AV, BV, CV, D) * (180.0 / math.pi)\r\n\r\n di = di - temp\r\n rot = rotaxis(math.pi * (di / 180.0), CV - BV)\r\n D = (D - BV).left_multiply(rot) + BV\r\n\r\n return D.get_array()\r\n\r\ndef cdl_spec(pos1, pos2, bond, phi, psi): #Calculates bond angles within amino acids\r\n for key, value in Lib_class.items():\r\n if aa123[pos1] in value:\r\n x1 = key\r\n for key, value in x_class.items():\r\n if aa123[pos2] in value:\r\n x2 = key\r\n if psi == 180 or psi == 175:\r\n psi = -180\r\n if phi == 180 or phi == 175:\r\n phi = -180\r\n x3 = str(x1) + str(x2)\r\n x4 = CDL.loc[(CDL[\"ResTypeGroup\"] == x3) & (CDL[\"Phi\"] == round(phi/10.0)*10) & (CDL[\"Psi\"] == round(psi/10.0)*10)]\r\n meanloc = \"m\" + str(bond)\r\n print(phi, psi)\r\n mean = float(x4[meanloc])\r\n stdloc = \"s\" + str(bond)\r\n std = float(x4[stdloc])\r\n return np.random.normal(mean, std)\r\n\r\ndef odl_spec(pos1, pos2, psi, phi): #Calculates Omega Bond angles\r\n for key, value in Lib_class.items():\r\n if aa123[pos1] in value:\r\n x1 = key\r\n for key, value in x_class.items():\r\n if aa123[pos2] in value:\r\n x2 = key\r\n if psi == 180 or phi == 175:\r\n psi = -180\r\n if phi == 180 or phi == 175:\r\n phi = -180\r\n x3 = str(x1) + str(x2)\r\n x4 = ODL.loc[(ODL[\"Phi(+1)\"] == (round(phi/10)*10)) & (ODL[\"Psi(0)\"] == (round(psi/10)*10)) & (CDL[\"ResTypeGroup\"] == x3)]\r\n mean = x4[\"mW(+1)\"]\r\n std = x4[\"sW(+1)\"]\r\n return float(np.random.normal(mean, std))\r\n\r\n\r\ndef ndrd_spec_random(pos1, pos2): #Calculates Phi and Psi angle probabilties for adjacent amino acids, right to left (N to C)\r\n x1 = aa123[pos1]\r\n x2 = aa123[pos2]\r\n NDRD_subset2 = NDRD.loc[(NDRD[\"AminoAcid\"] == x1) & (NDRD[\"AA2\"] == x2) & (NDRD[\"Pos\"] == \"right\")]\r\n NDRD_subset = NDRD.loc[(NDRD[\"AminoAcid\"] == x1) & (NDRD[\"AA2\"] == x2) & (NDRD[\"Pos\"] == \"right\")].iloc[:, 7]\r\n NDRD_list = NDRD_subset.values.tolist()\r\n return NDRD_subset2.iloc[bisect_right(NDRD_list, random.uniform(0.00001, 1)), 3:5]\r\n\r\n\r\ndef ndrd_spec_phi(pos1, pos2, phi): #Calculates Phi and Psi angle probabilties for adjacent amino acids, right to left (N to C)\r\n x1 = aa123[pos1]\r\n x2 = aa123[pos2]\r\n\r\n NDRD_subset2 = NDRD.loc[(NDRD[\"AminoAcid\"] == x1) & (NDRD[\"AA2\"] == x2)\r\n & (NDRD[\"Pos\"] == \"right\") & (NDRD[\"Phi\"] == (round(phi/5.0)*5))]\r\n NDRD_subset = NDRD.loc[(NDRD[\"AminoAcid\"] == x1) & (NDRD[\"AA2\"] == x2)\r\n & (NDRD[\"Pos\"] == \"right\") & (NDRD[\"Phi\"] == (round(phi/5.0)*5))].iloc[:, 7]\r\n NDRD_list = NDRD_subset.values.tolist()\r\n return NDRD_subset2.iloc[bisect_right(NDRD_list, random.uniform((NDRD_subset2.iloc[1, 7] - NDRD_subset2.iloc[1, 5]),\r\n NDRD_subset2.iloc[-1, 7])), 3:5][\"Psi\"]\r\n\r\n\r\ndef ndrd_spec_psi(pos1, pos2, psi): #Calculates Phi and Psi angle probabilties for adjacent amino acids, right to left (N to C)\r\n x1 = aa123[pos1]\r\n x2 = aa123[pos2]\r\n NDRD_subset2 = NDRD.loc[(NDRD[\"AminoAcid\"] == x1) & (NDRD[\"AA2\"] == x2)\r\n & (NDRD[\"Pos\"] == \"right\") & (NDRD[\"Phi\"] == (round(psi/5.0)*5))]\r\n NDRD_subset = NDRD.loc[(NDRD[\"AminoAcid\"] == x1) & (NDRD[\"AA2\"] == x2)\r\n & (NDRD[\"Pos\"] == \"right\") & (NDRD[\"Phi\"] == (round(psi/5.0)*5))].iloc[:, 7]\r\n NDRD_list = NDRD_subset.values.tolist()\r\n return NDRD_subset2.iloc[bisect_right(NDRD_list, random.uniform((NDRD_subset2.iloc[1, 7] - NDRD_subset2.iloc[1, 5]),\r\n NDRD_subset2.iloc[-1, 7])), 3:5][\"Phi\"]\r\n\r\n","sub_path":"Angle_Calc.py","file_name":"Angle_Calc.py","file_ext":"py","file_size_in_byte":6689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"216219142","text":"#! python\n\nfrom functools import reduce\n\nperson = [\n {'name': 'George', 'age': 10},\n {'name': 'Spencer', 'age': 12},\n {'name': 'Mary', 'age': 14},\n {'name': 'Logan', 'age': 16},\n {'name': 'Rachel', 'age': 18},\n {'name': 'Scotty', 'age': 20}\n]\n\nsum_ages = reduce(lambda ages, a: ages + a['age'], person, 0)\nprint(f'Sum ages: {sum_ages}')\n\nages = map(lambda a: a['age'], person)\nminors = filter(lambda m: m < 18, ages)\nsum_minors = reduce(lambda s_m, a: s_m + a, minors, 0)\nprint(f'Sum minors: {sum_minors}')\n","sub_path":"secao15_programacao_funcional/aula9_reduce/reduce.py","file_name":"reduce.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"171492683","text":"import pandas as pd\n\ndata=pd.read_csv(\"all_stocks_5yr.csv\")\nprint(data.head(6))\n\n# convert \"date\" to datetime type\ndata.date = pd.to_datetime(data.date)\nprint(data.dtypes)\n\n## Set the DataFrame index using existing columns.\n## inplace=True : the dataframe will not be modified\ndata.set_index('date', inplace=True)\n\n\n\n","sub_path":"Operations/date-time.py","file_name":"date-time.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"24267074","text":"#!/usr/bin/env python3\nimport rospy\n\nfrom arm_lib.msg import GripperAngles\nfrom geometry_msgs.msg import Vector3\nfrom std_msgs.msg import String, Bool\n\nfrom arm_lib.srv import MoveArm, MoveGripper\n\nimport constants as Constant\nimport math\nimport time\n\n'''\n\nfinger - max = 90 and min = 0 (pos) --> increase - close\nfinger - max = 90 and min = 0 (pos) --> increase - open\n'''\n\n\nclass GripperCollision:\n def __init__(self):\n self.base = False\n self.finger1F = False\n self.finger2F = False\n\n def reset(self):\n self.base = False\n self.finger1F = False\n self.finger2F = False\n\n\n def checkFrontFingerCol(self):\n return self.finger1F and self.finger2F\n\n\nclass GripperControl:\n angles = None\n\n def __init__(self):\n self.gripperCollision = GripperCollision()\n \n self.gripperToggle = rospy.Service('/' + Constant.ROBOT_NAME + '/gripper/toggle', MoveGripper, self.toggleGrip)\n\n self.collisionCheckPub = rospy.Publisher('/' + Constant.ROBOT_NAME + '/gripper/collision/check', Bool, queue_size=10)\n self.gripperColSub = rospy.Subscriber('/'+ Constant.ROBOT_NAME + '/gripper/collision', String, self.checkCollision)\n self.moveFingersPub = rospy.Publisher('/' + Constant.ROBOT_NAME + '/gripper/move_fingers', GripperAngles, queue_size=10)\n\n self.default()\n\n def grip(self):\n self.angles.arm5_arm6_joint -= 1\n self.angles.arm5_arm7_joint += 1\n\n return self.angles\n\n def release(self):\n self.angles.arm5_arm6_joint += 1\n self.angles.arm5_arm7_joint -= 1\n\n return self.angles\n\n def belowLimit(self):\n return self.angles.arm5_arm6_joint < -5 or self.angles.arm5_arm6_joint > 5\n\n def beyondLimit(self):\n return self.angles.arm5_arm6_joint > 30 or self.angles.arm5_arm6_joint < -30\n\n def default(self):\n self.angles = GripperAngles(30, -30)\n self.gripperCollision.reset()\n\n def radToDeg(self, rad):\n return rad * 180 / math.pi\n\n def checkCollision(self, col):\n cols = col.data.split(\"=\")\n # print(col)\n for i in range(len(cols)):\n if len(cols[i]) <= 0:\n continue\n else:\n objs = cols[i].split(\"-\")\n if not objs[0].startswith(Constant.ROBOT_NAME):\n if \"arm6\" in objs[1]:\n self.gripperCollision.finger1F = True\n\n elif \"arm7\" in objs[1]:\n self.gripperCollision.finger2F = True\n \n\n if self.gripperCollision.finger1F or self.gripperCollision.finger2F:\n self.collisionCheckPub.publish(True)\n \n else: \n self.collisionCheckPub.publish(False)\n\n def toggleGrip(self, grip):\n innerGripped = 0\n outerGripped = 0\n self.gripperCollision.reset()\n\n if grip.action == 'catch':\n while True:\n self.moveFingersPub.publish(self.grip())\n time.sleep(0.03)\n if not self.gripperCollision.checkFrontFingerCol() and self.belowLimit():\n return False\n\n if self.gripperCollision.checkFrontFingerCol():\n if innerGripped > 6:\n break\n\n innerGripped += 1\n\n print (\"Gripped\")\n return True\n\n else:\n print(\"Release called\")\n while not self.beyondLimit():\n self.moveFingersPub.publish(self.release())\n time.sleep(0.05)\n\n return True\n\n return False\n \n\n\ndef main():\n gripperControl = GripperControl()\n\n rospy.init_node('gripper_controller') \n print(\"Gripper control initiated\")\n rospy.spin()\n \n\nif __name__=='__main__':\n main()","sub_path":"arm_ws1/build/arm_gazebo/catkin_generated/installspace/gripper_control.py","file_name":"gripper_control.py","file_ext":"py","file_size_in_byte":3829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"165564212","text":"from flask import Blueprint, request, jsonify\nfrom poserrank.models import User, Group\n\napi = Blueprint('api', __name__)\n\n@api.route('/users/search', methods=['POST'])\ndef search_users():\n\t\"\"\"\n\tthis function may not scale -- I highly doubt this will ever be a problem\n\t\"\"\"\n\tif 'query' in request.form:\n\t\tif 'group' in request.form:\n\t\t\tgroup = Group.query.filter(Group.id == request.form['group'])[0]\n\t\t\tusers = [m.user for m in group.memberships]\n\t\telse:\n\t\t\tusers = User.query.all()\n\t\tall_usernames = [user.username for user in users]\n\t\t#all_fullnames = [user.full_name for user in all_users]\n\t\tsuggestions = list(filter(lambda x: request.form['query'].lower() in x.lower(), all_usernames))\n\t\t#suggestions += list(filter(lambda x: request.form['query'].lower() in x.lower(), all_usernames))\n\n\t\treturn jsonify(suggestions)\n\n\telse:\n\t\treturn \"Request must contain 'query' field\", 400","sub_path":"poserrank/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"101014549","text":"\"\"\"\nfunction argument\n\nmultiple parameters and arguments\n\n\n\"\"\"\n\ndef adding(x, y):\n z = x + y\n return z\n\n\ndef adding(x, y, z):\n sum = x + y + z\n return sum\n\n# adding n1 and n2\nn1 = 10\nn2 = 20\nresult = adding(n1, n2)\n# print(\"adding n1:{n1} and n2:{n2} = {res}\".format(n1=n1, n2=n2, res = result))\nprint(\"{n1} + {n2} = {res}\".format(n1=n1, n2=n2, res = result))\n\n\n# adding n1, n2, n3\n\n\n","sub_path":"stem1400_modules/module_4_function/func1_define/function_6_arg_3.py","file_name":"function_6_arg_3.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"179007926","text":"from unittest import mock\n\nimport asynctest\nimport pytest\n\nfrom ai.backend.client.session import Session\nfrom tests import ContextMagicMock\n\n\n@pytest.mark.asyncio\n@pytest.mark.integration\nclass TestIntegrationManager:\n\n async def test_get_manager_status(self):\n with Session() as sess:\n resp = sess.Manager.status()\n assert resp['status'] == 'running'\n assert 'active_sessions' in resp\n\n\nclass TestManager:\n\n def test_status(self, mocker):\n return_value = {'status': 'running', 'active_sessions': 3}\n mock_json_coro = asynctest.CoroutineMock(return_value=return_value)\n mock_req_obj = mocker.Mock()\n mock_req_obj.fetch.return_value = ContextMagicMock(status=200,\n json=mock_json_coro)\n mocker.patch('ai.backend.client.manager.Request', return_value=mock_req_obj)\n\n with Session() as session:\n resp = session.Manager.status()\n mock_req_obj.fetch.assert_called_once_with()\n assert resp['status'] == return_value['status']\n assert resp['active_sessions'] == return_value['active_sessions']\n\n def test_freeze(self, mocker):\n mock_req_obj = mocker.Mock()\n mock_req_obj.fetch.return_value = ContextMagicMock(status=204)\n mocker.patch('ai.backend.client.manager.Request', return_value=mock_req_obj)\n\n with Session() as session:\n session.Manager.freeze()\n mock_req_obj.fetch.assert_called_once_with()\n\n def test_freeze_opt_force_kill(self, mocker):\n mock_req_obj = mock.Mock()\n mock_req_obj.fetch.return_value = ContextMagicMock(status=204)\n mocker.patch('ai.backend.client.manager.Request', return_value=mock_req_obj)\n\n with Session() as session:\n session.Manager.freeze(force_kill=True)\n mock_req_obj.fetch.assert_called_once_with()\n\n def test_unfreeze(self, mocker):\n mock_req_obj = mock.Mock()\n mock_req_obj.fetch.return_value = ContextMagicMock(status=204)\n mocker.patch('ai.backend.client.manager.Request', return_value=mock_req_obj)\n\n with Session() as session:\n session.Manager.unfreeze()\n mock_req_obj.fetch.assert_called_once_with()\n","sub_path":"tests/test_manager.py","file_name":"test_manager.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"484120506","text":"'''用户'''\nfrom iSoft.core.Fun import Fun\nfrom iSoft import auth, login_manager, app\nfrom flask import g, json, request\nfrom iSoft.core.AlchemyEncoder import AlchemyEncoder\nfrom iSoft.model.AppReturnDTO import AppReturnDTO\nfrom iSoft.model.framework.RequestPagesModel import RequestPagesModel\nfrom iSoft.model.framework.RequestSaveModel import RequestSaveModel\nfrom iSoft.model.framework.PostBaseModel import PostBaseModel\nfrom iSoft.entity.model import FaQuery\nfrom iSoft.dal.QueryDal import QueryDal\n\n\n@app.route('/query/list', methods=['GET', 'POST'])\n@auth.login_required\ndef query_list():\n j_data = request.json\n if j_data is None:\n return Fun.class_to_JsonStr(AppReturnDTO(False, \"参数有误\"))\n in_ent = RequestPagesModel(j_data)\n where = []\n for search in in_ent.SearchKey:\n if search[\"Type\"] == \"like\":\n where.append(\n eval(\"FaQuery.%(Key)s.like('%%%(Value)s%%')\" % search))\n else:\n where.append(eval(\"FaQuery.%(Key)s%(Type)s%(Value)s\" % search))\n\n criterion = []\n for search in in_ent.OrderBy:\n search[\"Value\"] = search[\"Value\"].lower()\n criterion.append(eval(\"FaQuery.%(Key)s.%(Value)s()\" % search))\n\n _modele = QueryDal()\n re_ent, message = _modele.query_findall(in_ent.PageIndex, in_ent.PageSize,\n criterion, where)\n\n if message.IsSuccess:\n message.set_data(re_ent)\n return Fun.class_to_JsonStr(message)\n\n\n@app.route('/query/save', methods=['GET', 'POST'])\n@auth.login_required\ndef query_save():\n j_data = request.json\n if j_data is None:\n return Fun.class_to_JsonStr(AppReturnDTO(False, \"参数有误\"))\n in_ent = RequestSaveModel(j_data)\n _modele = QueryDal()\n re_ent, message = _modele.query_Save(\n in_dict=in_ent.Data, saveKeys=in_ent.SaveKeys)\n if message.IsSuccess:\n message.set_data(re_ent)\n return Fun.class_to_JsonStr(message)\n\n\n@app.route('/query/delete', methods=['GET', 'POST'])\n@auth.login_required\ndef query_delete():\n j_data = request.json\n if j_data is None:\n return Fun.class_to_JsonStr(AppReturnDTO(False, \"参数有误\"))\n in_ent = PostBaseModel(j_data)\n _modele = QueryDal()\n message = _modele.query_delete(in_ent.Key)\n return Fun.class_to_JsonStr(message)\n\n\n@app.route('/query/single_code', methods=['GET', 'POST'])\n@auth.login_required\ndef query_single_code():\n j_data = request.json\n if j_data is None:\n return Fun.class_to_JsonStr(AppReturnDTO(False, \"参数有误\"))\n in_ent = PostBaseModel(j_data)\n _modele = QueryDal()\n re_ent, message = _modele.query_singleByCode(in_ent.Key)\n if message.IsSuccess:\n message.set_data(re_ent)\n return Fun.class_to_JsonStr(message)\n\n\n@app.route('/query/query', methods=['GET', 'POST'])\n@auth.login_required\ndef query_query():\n j_data = request.json\n if j_data is None:\n return Fun.class_to_JsonStr(AppReturnDTO(False, \"参数有误\"))\n in_ent = RequestPagesModel(j_data)\n\n _modele = QueryDal()\n re_ent, message = _modele.query_queryByCode(\n in_ent.Key, in_ent.PageIndex, in_ent.PageSize, in_ent.OrderBy, in_ent.SearchKey)\n\n if message.IsSuccess:\n message.set_dict_data(re_ent)\n return Fun.class_to_JsonStr(message)\n","sub_path":"iSoft/QueryController.py","file_name":"QueryController.py","file_ext":"py","file_size_in_byte":3290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"45519017","text":"#!/usr/bin/env python2\n\nimport sys\nimport struct\nimport datetime\n\n# You can use this method to exit on failure conditions.\ndef bork(msg):\n sys.exit(msg)\n\n# helper method\ndef is_ascii(char_arr):\n ret = True\n for c in char_arr:\n if c < 0 and c > 127:\n return False\n return ret\n\n# Some constants. You shouldn't need to change these.\nMAGIC = 0xdeadbeef\nVERSION = 1\nPNGSIG = b'\\x89\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a'\n\nif len(sys.argv) < 2:\n sys.exit(\"Usage: python2 stub.py input_file.fpff\")\n\n# Normally we'd parse a stream to save memory, but the FPFF files in this\n# assignment are relatively small.\nwith open(sys.argv[1], 'rb') as fpff:\n data = fpff.read()\n\n# Hint: struct.unpack will be VERY useful.\n# Hint: you might find it easier to use an index/offset variable than\n# hardcoding ranges like 0:8\nmagic, version = struct.unpack(\" 0\", int(section))\n\nprint(\"------- HEADER -------\")\nprint(\"MAGIC: %s\" % hex(magic))\nprint(\"VERSION: %d\" % int(version))\nprint(\"TIMESTAMP: \" + str(datetime.datetime.fromtimestamp(int(timestamp))))\nprint(\"AUTHOR: \" + str(author))\nprint(\"SECTION: %d\" % int(section))\n\nloc = 24\nend = len(data)\ni = 1\n\nprint(\"------- BODY -------\")\n\nwhile ((loc+8) <= end):\n stype, slen = struct.unpack(\" 0):\n t = int(stype)\n \n # PNG\n if t == 1:\n print(\"SECTION %d: STYPE = PNG (1)\" % i)\n # dump png data to a file\n file_name = \"pic\" + str(loc) + \".png\"\n with open(file_name, \"wb\") as binary_file:\n binary_file.write(PNGSIG)\n binary_file.write(data[loc:(loc+slen_num)])\n\n print(\"The png data was written to a file as \\\"%s\\\"\" % file_name)\n \n #DWORDS\n elif t == 2:\n print(\"SECTION %d: STYPE = DWORDS (2)\" % i)\n if ((slen_num % 8) == 0):\n svalue = struct.unpack(\"<\" + \"Q\"*(slen_num/8), data[loc:(loc+(slen_num))])[0]\n print(str(int(svalue)))\n \n #UTF8\n elif t == 3:\n print(\"SECTION %d: STYPE = UTF8 (3)\" % i)\n svalue = data[loc:(loc+slen_num)]\n try:\n print(svalue.decode('utf-8'))\n except UnicodeError:\n bork(\"Bad UTF8! Got %s but expected UTF-8 encoded text\" % svalue)\n \n #DOUBLES\n elif t == 4: \n print(\"SECTION %d: STYPE = DOUBLES (4)\" % i)\n if ((slen_num % 8) == 0):\n svalue = struct.unpack(\"<\" +\"d\"*(slen_num/8), data[loc:(loc+slen_num)])[0]\n print(str(int(svalue)))\n \n #WORDS\n elif t == 5: \n print(\"SECTION %d: STYPE = WORDS (5)\" % i)\n if ((slen_num % 4) == 0):\n svalue = struct.unpack(\"<\" + \"L\"*(slen_num/4), data[loc:(loc+slen_num)])[0]\n print(str(int(svalue)))\n \n #COORD\n elif t == 6 and slen_num == 16: \n print(\"SECTION %d: STYPE = COORD (6)\" % i)\n lng, lat = struct.unpack(\" int(section)):\n bork(\"Bad Reference! %d was not within [0, nsects - 1]\" % word)\n else:\n print(str(int(svalue)))\n \n #ASCII\n elif t == 9:\n print(\"SECTION %d: STYPE = ASCII (9)\" % i)\n svalue = data[loc:(loc+slen_num)]\n if (is_ascii(svalue)):\n print(svalue.decode(\"ascii\"))\n else:\n bork(\"Bad ASCII!\")\n else:\n print(\"INVALID STYPE!\")\n \n i += 1\n\n loc += slen_num\n print(\"\\n\")\n \n","sub_path":"week/8/stub.py","file_name":"stub.py","file_ext":"py","file_size_in_byte":4389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"268562883","text":"from torchvision import transforms\nimport torch, torchvision\nfrom albumentations import (\n HorizontalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90,\n Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, HueSaturationValue,\n IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur, RandomBrightnessContrast, IAAPiecewiseAffine,\n IAASharpen, IAAEmboss, Flip, OneOf, Compose, Normalize, Cutout\n)\nfrom albumentations.pytorch.transforms import ToTensor\nfrom PIL import Image\nimport numpy as np\nimport albumentations as A\nimport albumentations.pytorch as AP\n\n\nclass LoadData:\n def __init__(self):\n pass\n \n class AlbumTransformer(object):\n def strong_aug(self,p=.5):\n return Compose([\n Flip(),\n Transpose(),\n OneOf([\n IAAAdditiveGaussianNoise(),\n GaussNoise(),\n ], p=0.2),\n OneOf([\n MotionBlur(p=.2),\n MedianBlur(blur_limit=3, p=0.1),\n Blur(blur_limit=3, p=0.1),\n ], p=0.2),\n ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),\n OneOf([\n OpticalDistortion(p=0.3),\n GridDistortion(p=.1),\n IAAPiecewiseAffine(p=0.3),\n ], p=0.2),\n OneOf([\n CLAHE(clip_limit=2),\n IAASharpen(),\n IAAEmboss(),\n RandomBrightnessContrast(), \n ], p=0.3),\n HueSaturationValue(p=0.3),\n Cutout(num_holes=1, max_h_size=8, max_w_size=8, fill_value=0.5*255)\n # Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n # ToTensor()\n ], p=p)\n\n def augment(self, fun, image):\n return fun(image=image)['image']\n\n def __call__(self, img):\n aug = self.strong_aug(p=0.9)\n return Image.fromarray(self.augment(aug, np.array(img)))\n\n class TestAlbTransforms:\n def __init__(self, transforms_list=[]):\n transforms_list = []\n transforms_list.append(A.Normalize(mean=0.5,std=0.5))\n transforms_list.append(AP.ToTensor())\n self.transforms = A.Compose(transforms_list)\n\n def __call__(self, img):\n img = np.array(img)\n #print(img)\n return self.transforms(image=img)['image']\n\n def load_data(self):\n transform = transforms.Compose(\n [\n self.AlbumTransformer(),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n #transforms.RandomErasing(p=0.5, scale=(0.005,0.055), ratio=(0.05,0.5))\n ])\n \n trainset = torchvision.datasets.CIFAR10(root='./data', train=True,\n download=True, transform=transform)\n train_loader = torch.utils.data.DataLoader(trainset, batch_size=128,\n shuffle=True, num_workers=4)\n transform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n testset = torchvision.datasets.CIFAR10(root='./data', train=False,\n download=True, transform=self.TestAlbTransforms())\n test_loader = torch.utils.data.DataLoader(testset, batch_size=128,\n shuffle=False, num_workers=4)\n\n # classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n\n return train_loader, test_loader","sub_path":"session9/albumentation_data.py","file_name":"albumentation_data.py","file_ext":"py","file_size_in_byte":3801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"589578657","text":"import random\n\nfrom entities.drawable_entity import DrawableEntity\nfrom entities.crumb import Crumb\nfrom entities.message import MESSAGE_WAIT, ComeMessage\nfrom utils import rect_in_world, rects_are_overlapping, normalize\n\n\nclass Explorer(DrawableEntity):\n SIZE = 7\n MAX_VELOCITY = 1.3\n PICKUP_REACH = 1\n SENSOR_RANGE = 15\n SENSE_DELAY = 100\n COLOR = 'blue'\n HAS_ROCK_COLOR = 'yellow'\n SENSOR_COLOR = 'yellow'\n\n def __init__(self, x, y, world, collaborative = False):\n self.x = x\n self.y = y\n self.world = world\n self.dx, self.dy = self._get_new_direction()\n self.ticks = 0\n self.has_rock = False\n self.inbox = []\n self.collaborative = collaborative\n\n def draw(self, canvas):\n helper = Explorer(self.x, self.y, self.world)\n helper.SIZE = 2 * self.SENSOR_RANGE + self.SIZE\n top_left, bottom_right = helper.get_bounds()\n canvas.create_oval(top_left.x,\n top_left.y,\n bottom_right.x,\n bottom_right.y,\n outline=self.SENSOR_COLOR)\n\n top_left, bottom_right = self.get_bounds()\n canvas.create_rectangle(top_left.x,\n top_left.y,\n bottom_right.x,\n bottom_right.y,\n fill=self.HAS_ROCK_COLOR if self.has_rock else self.COLOR)\n\n def clear_inbox(self):\n self.inbox = []\n\n def clear_inbox_from(self, source):\n self.inbox = [msg for msg in self.inbox if msg.source != source]\n\n def transfer_rock_to_carrier(self):\n self.has_rock = False\n\n def tick(self):\n self._tick()\n self.ticks += 1\n\n def _tick(self):\n # Handle layer 1\n if not self._can_move() and not self.has_rock:\n self.dx, self.dy = self._get_new_direction()\n\n\n # Handle layer 2\n if self.has_rock and self._drop_available():\n self.has_rock = False;\n self.world.rock_collected()\n return\n\n # Handle layer 3\n\n if self.has_rock and not self._drop_available(): # and not self._sense_crumbs():\n self.dx, self.dy = normalize(self.world.mars_base.x - self.x,\n self.world.mars_base.y - self.y)\n # Drop crumbs con collaborative learning\n if self.collaborative and not self._sense_crumbs():\n self.world.add_entity(Crumb(self.x-self.dx, self.y-self.dy))\n self.world.add_entity(Crumb(self.x-self.dx, self.y-self.dy))\n\n # Handle layer 4\n rock = self._rock_available()\n if not self.has_rock:\n if rock:\n self.has_rock = True\n self.world.remove_entity(rock)\n return;\n rock = self._sense_rock()\n if rock:\n self.dx, self.dy = normalize(rock.x - self.x, rock.y - self.y)\n\n if self.collaborative:\n # Handle layer 5 (collaborative)\n crumb = self._crumb_available()\n if not self.has_rock: \n if crumb:\n self.world.remove_entity(crumb)\n\n crumb = self._sense_crumbs()\n if crumb:\n self.dx, self.dy = normalize(crumb.x - self.x, crumb.y - self.y)\n\n # Handle layer 4 (6 collaborative)\n\n self._move()\n\n def _move(self):\n self.x += self.dx\n self.y += self.dy\n\n def _get_new_direction(self):\n dx = random.uniform(-self.MAX_VELOCITY, self.MAX_VELOCITY)\n dy = random.uniform(-self.MAX_VELOCITY, self.MAX_VELOCITY)\n return normalize(dx, dy)\n\n def _can_move(self):\n new_self = Explorer(self.x + self.dx,\n self.y + self.dy,\n self.world)\n bounds = new_self.get_bounds()\n\n if not rect_in_world(bounds, new_self.world):\n return False\n\n for other in new_self.world.entities:\n # Allow collisions with other explorers.\n if isinstance(other, Explorer):\n continue\n\n if rects_are_overlapping(bounds, other.get_bounds()):\n return False\n\n return True\n\n def _rock_available(self):\n for rock in self.world.rocks:\n if rects_are_overlapping(self.get_bounds(),\n rock.get_bounds(),\n self.PICKUP_REACH):\n return rock\n\n return None \n\n def _crumb_available(self):\n for crumb in self.world.crumbs:\n if rects_are_overlapping(self.get_bounds(),\n crumb.get_bounds(),\n self.PICKUP_REACH):\n return crumb\n\n return None\n\n def _sense_crumbs(self):\n # Wait a bit so that the explorers spread out.\n if self.ticks < self.SENSE_DELAY:\n return None\n\n for crumb in self.world.crumbs:\n if rects_are_overlapping(self.get_bounds(),\n crumb.get_bounds(),\n self.SENSOR_RANGE):\n return crumb\n\n return None\n\n def _sense_rock(self):\n # Wait a bit so that the explorers spread out.\n if self.ticks < self.SENSE_DELAY:\n return None\n\n for rock in self.world.rocks:\n if rects_are_overlapping(self.get_bounds(),\n rock.get_bounds(),\n self.SENSOR_RANGE):\n return rock\n\n return None\n\n def _drop_available(self):\n if rects_are_overlapping(self.get_bounds(),\n self.world.mars_base.get_bounds(),\n self.PICKUP_REACH):\n return True\n return False\n","sub_path":"entities/explorer.py","file_name":"explorer.py","file_ext":"py","file_size_in_byte":5951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"354385726","text":"import numpy as np\n\nfrom agents import BaseAgent\n\nclass FrozenAgent(BaseAgent):\n def __init__(self, gamma, states, policy):\n self.states = states\n super(FrozenAgent, self).__init__()\n \n self.gamma = gamma # discount parameter\t\n self.policy = policy # mapping of states to actions\t\t\n self.statesReturns = {} # states and the discounted returns that followed\n\t\t\n def initVariables(self):\n self.v = {}\n for state in self.states:\n self.v[state] = 0 \n\n def update(self):\n G = 0\t\n\t\t# assemble discounted future rewards from the agent's memory\t\n for state, reward in reversed(self.memory):\t\t\t\n if state not in self.statesReturns:\n self.statesReturns[state] = [G]\n else:\n self.statesReturns[state].append(G)\n G = reward + self.gamma * G\n\n # use discounted future rewards to calculate averages for each state\n for state in self.statesReturns:\n self.v[state] = np.mean(self.statesReturns[state])\n\n self.memory = []\n\n def chooseAction(self, state):\t\t\n action = self.policy[state]\t\t\n return action\n\t\t\n def print(self):\n for state in self.v:\n print(state, '%.5f' % self.v[state])","sub_path":"src/agents/frozen_agent.py","file_name":"frozen_agent.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"481175799","text":"# 在o(1)时间内删除链表节点\n# 给定单向链表的头指针和一个节点指针,在o(1)时间内删除该节点指针\n\n\n# 删除单向链表中的节点node的两种方法:\n# 1.从头遍历链表o(n),找该节点node的前一个节点preNode, 让preNode.next = preNode.next.next\n# 2.不用从头遍历o(1),直接找该节点node的下一个节点nextNode,让node.data = nextNode.data, node.next = nextNode.next\nclass Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n\n\n# 平均时间复杂度o(1)\ndef deleteNode(head, toBeDeleted):\n if head is None or toBeDeleted is None:\n return\n # 时间复杂度o(1)\n if toBeDeleted.next is not None:\n nextNode = toBeDeleted.next\n toBeDeleted.data = nextNode.data\n toBeDeleted.next = nextNode.next \n elif head == toBeDeleted:\n head = None\n # 链表中有多个节点,删除为节点,这时需要从头遍历o(n)\n else:\n temp = head\n while temp.next is not toBeDeleted:\n temp = temp.next\n temp.next = None\n\ndef buildLinkedNode(arr):\n if len(arr) == 0:\n return None\n node = Node(arr[len(arr)-1])\n node.next = None\n head = node\n if len(arr) >= 2:\n for i in range(len(arr)-2, -1, -1):\n node = Node(arr[i])\n node.next = head\n head = node\n return head\n\ndef outputLinkedNode(head):\n if head is None:\n return []\n arr = []\n temp = head\n while temp is not None:\n arr.append(temp.data)\n temp = temp.next\n return arr\n\ndef getNode(head, index):\n if head is None or index < 0:\n return None\n if index == 0:\n return head\n temp = head\n for i in range(index):\n if temp.next is None:\n return None\n temp = temp.next\n return temp\n\nif __name__ == \"__main__\":\n arr = [1,2,3,4,5,6,7,8]\n head = buildLinkedNode(arr)\n print(outputLinkedNode(head))\n print(getNode(head, 2).data)\n deleteNode(head, getNode(head, 2))\n print(outputLinkedNode(head))\n\n \n ","sub_path":"CodingInterviews/18.1.删除链表的节点.py","file_name":"18.1.删除链表的节点.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"395205177","text":"# Two wires are connected to a central port and extend outward on a grid. Trace the path each wire takes as it\n# leaves the central port. The wires twist and turn, but the two wires occasionally cross paths. To fix the circuit,\n# find the intersection point closest to the central port, using the Manhattan distance.\n\n\ndef left(current, num, wire_path):\n # x-axis decreases\n new = current[0] - num\n # add all grid points passed to wire_path\n for i in range(new, current[0]):\n wire_path.add((i, current[1]))\n return new\n\ndef right(current, num, wire_path):\n # x-axis increases\n new = current[0] + num\n for i in range(current[0]+1, new+1):\n wire_path.add((i, current[1]))\n return new\n\ndef up(current, num, wire_path):\n # y-axis decreases\n new = current[1] - num\n for i in range(new, current[1]):\n wire_path.add((current[0], i))\n return new\n\ndef down(current, num, wire_path):\n # y-axis increases\n new = current[1] + num\n for i in range(current[1]+1, new+1):\n wire_path.add((current[0], i))\n return new\n\n\ndef follow_wire(wire):\n current = [0, 0]\n wire_path = set()\n for x in wire:\n print(current)\n print(x)\n if x[0] == \"L\":\n current[0] = left(current, int(x[1:]), wire_path)\n elif x[0] == \"R\":\n current[0] = right(current, int(x[1:]), wire_path)\n elif x[0] == \"U\":\n current[1] = up(current, int(x[1:]), wire_path)\n elif x[0] == \"D\":\n current[1] = down(current, int(x[1:]), wire_path)\n else:\n print(\"not found\")\n return wire_path\n\ndef find_crossed(path1, path2):\n crossed = []\n for x in path1:\n if x in path2:\n crossed.append(x)\n return crossed\n\ndef find_closest(crossed):\n manhattan_distance = []\n for x in crossed:\n distance = abs(x[0]) + abs(x[1])\n manhattan_distance.append(distance)\n closest = min(manhattan_distance)\n return closest\n\ndef main():\n with open(\"Day3_input.txt\", 'r') as f:\n wires = []\n for line in f:\n wires.append(line)\n wire_a = wires[0].split(\",\")\n wire_b = wires[1].split(\",\")\n\n a_path = follow_wire(wire_a)\n print(\"a_path = \" + str(a_path))\n\n b_path = follow_wire(wire_b)\n print(\"b_path = \" + str(b_path))\n\n crossed = find_crossed(a_path, b_path)\n print(\"Crossed = \" + str(crossed))\n\n answer = find_closest(crossed)\n print(\"Closest crossed wire = \" + str(answer))\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"Day3_v2/Day3a_v2.py","file_name":"Day3a_v2.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"320569479","text":"import requests\nimport json\n\nclass Download_Info():\n def __init__(self, file_name):\n self.file_name = file_name\n\n def get_info_for_var_id(self, id):\n full_data = requests.get('https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi?db=clinvar&id=' + id + '&retmode=json')\n self.full_data_dict = json.loads(full_data.text)\n return self.full_data_dict\n\n def write_to_file(self):\n with open(self.file_name, 'r') as f:\n lines = f.readlines()\n lines = lines[70:80]\n for line in lines:\n line = line.split('\\t')\n (self.get_info_for_var_id(line[5]))\n with open(\"download_info.txt\", 'a') as d:\n json.dump(self.get_info_for_var_id(line[5]), d)\n d.write(\"\\n\\n\")\n\ndownload_files = Download_Info(\"clinvar.tsv\")\ndownload_files.write_to_file()\n\n","sub_path":"download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"253012127","text":"#Bucles / ciclos\n\n#While\n\nedad = 0\nwhile edad <= 20:\n print (\"Tienes:\"+str(edad))\n edad = edad + 1\n\n#Condicion mas ciclo\nedad =0\nwhile edad <= 20:\n if edad == 15:\n edad = edad + 1\n break\n print (\"Tienes:\"+str(edad))\n edad = edad + 1\n\n#for i \n\nlista = ['elemento 1','elemento 2','elemento 3']\n\n#recorrer letras de una string\nfor letra in \"cadena\":\n print(letra)\n\n#recorrer una lista / tupla o diccionario\nfor elementos in lista:\n print(elementos)\n","sub_path":"bucles.py","file_name":"bucles.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"143063481","text":"#\n# @lc app=leetcode.cn id=47 lang=python\n#\n# [47] 全排列 II\n#\n\n# @lc code=start\nclass Solution(object):\n def permuteUnique(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n res = []\n path = []\n used = [0 for _ in range(len(nums))]\n nums.sort()\n self.dfs(nums,res,path,used,0)\n return res\n def dfs(self,nums,res,path,used,usednums):\n if usednums == len(nums):\n res.append(path[:])\n return\n for i in range(len(nums)):\n if(used[i] == 0):\n #上一个相同的元素刚刚用过,处在同一层,被置0,则跳过\n if(i>0 and nums[i] == nums[i-1] and used[i-1] == 0):\n continue\n used[i] = 1\n path.append(nums[i])\n self.dfs(nums,res,path,used,usednums+1)\n used[i] = 0\n path.pop()\n return\n\n# @lc code=end\n\n","sub_path":"Week_02/47.全排列-ii.py","file_name":"47.全排列-ii.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"468396789","text":"import random\nimport colorama\n\ncolorama.init()\nITEMS = [\"jablko\", \"hruska\", \"banan\", \"hrozno\", \"clover\"] \nzelena = \"\\033[1;31m\"\nreset = \"\\033[0m\"\n\nprve = None\ndruhe = None\ntretie = None\npeniaze = 50\ndef hrat():\n\tglobal prve, druhe, tretie, peniaze\n\thrat = otazka()\n\twhile hrat == True and peniaze != 0:\n\t\tprve = tocit()\n\t\tdruhe = tocit()\n\t\ttretie = tocit()\n\t\tvypis()\n\t\thrat = otazka()\n\ndef otazka():\n\tglobal peniaze\n\todpoved = str(input(\"Chcete hrat dalej?\"))\n\tif odpoved == \"A\":\n\t\treturn True\n\telif odpoved == \"N\":\n\t\treturn False\n\telse:\n\t\tprint (\"Zla odpoved\")\n\ndef tocit():\n\tcislo = random.randint(0,4)\n\treturn ITEMS[cislo]\n\ndef vypis():\n\tglobal prve, druhe, tretie, peniaze\n\tif prve == druhe == tretie == \"clover\":\n\t\twin = 200\n\telif prve == druhe == tretie:\n\t\twin = 10\n\telif prve == druhe:\n\t\twin = 5\n\telif druhe == tretie:\n\t\twin = 5\n\telif prve == tretie:\n\t\twin = 5\n\telse:\n\t\twin = -1\n\tpeniaze += win\n\tif win > 0:\n\t\tprint (prve, druhe, tretie, zelena+\"vyhral si\", win)\n\t\tprint (reset+\"Mas\", peniaze,\"grc\")\n\telse:\n\t\tprint (prve, druhe, tretie,\"Prehral si\")\nif __name__ == '__main__':\n\thrat()","sub_path":"automat.py","file_name":"automat.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"458399312","text":"\n\nfrom xai.brain.wordbase.nouns._neigh import _NEIGH\n\n#calss header\nclass _NEIGHED(_NEIGH, ):\n\tdef __init__(self,): \n\t\t_NEIGH.__init__(self)\n\t\tself.name = \"NEIGHED\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"neigh\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_neighed.py","file_name":"_neighed.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"178764573","text":"import numpy as np\r\nimport cv2\r\nimport pickle\r\nfrom tensorflow.python.keras.models import load_model\r\n\r\n\r\n###parameterrs###\r\n\r\nwidth = 640\r\nheight = 480\r\nthreshold = 0.65\r\n#threshold means minimum probability to classify\r\n\r\n\r\n#this is the code for creatinng the camera objrct\r\ncapture = cv2.VideoCapture(0)\r\ncapture.set(3,width)\r\ncapture.set(4,height)\r\n\r\n#here im loading the saved pretrained model\r\nmodel = load_model('model.h5')\r\n\r\n#thid is the code for processing\r\ndef preProcessing(img):\r\n img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n img = cv2.equalizeHist(img)\r\n img = img/255\r\n return img\r\n\r\nwhile True:\r\n success, imgOriginal = capture.read()\r\n img = np.asarray(imgOriginal)\r\n img=cv2.resize(img,(32,32))\r\n img = preProcessing(img)\r\n cv2.imshow(\"Processsed Image\",img)\r\n img = img.reshape(1, 32, 32, 1)\r\n #prediction\r\n classIndex = int(model.predict_classes(img))\r\n\r\n labelDictionary = {0: '0', 1: 'අ', 2: 'ඉ', 3: 'ඊ', 4: 'උ', 5: 'එ', 6: 'ඒ', 7: 'ඔ', 8: 'ක', 9: 'ක්', 10: 'කා',\r\n 11: 'කැ', 12: 'කෑ', 13: 'කි', 14: 'කී', 15: 'කු', 16: 'කූ', 17: 'කෙ', 18: 'කේ', 19: 'කො',\r\n 20: 'කෝ', 21: 'ඛ', 22: 'ග', 23: 'ගි', 24: 'ගී', 25: 'ගු', 26: 'ගූ', 27: 'ඝ', 28: 'ඟ', 29: 'ච',\r\n 30: 'ඡ', 31: 'ජ', 32: 'ජ්', 33: 'ජි', 34: 'ජී', 35: 'ඣ', 36: 'ඤ', 37: 'ඥ', 38: 'ට', 39: 'ඨ',\r\n 40: 'ඩ',\r\n 41: 'ඪ', 42: 'ණ', 43: 'ඬ', 44: 'ත', 45: 'ත්', 46: 'ථ', 47: 'ථි', 48: 'ථී', 49: 'ද', 50: 'දු',\r\n 51: 'දූ', 52: 'ධ', 53: 'න', 54: 'ඳ', 55: 'ප', 56: 'පු', 57: 'පූ', 58: 'ඵ', 59: 'බ', 60: 'භ',\r\n 61: 'ම',\r\n 62: 'ම්', 63: 'මි', 64: 'මී', 65: 'ඹ', 66: 'ය', 67: 'ර', 68: 'ල', 69: 'ව', 70: 'ව්', 71: 'වි',\r\n 72: 'වී', 73: 'වු', 74: 'වූ', 75: 'ශ', 76: 'ෂ', 77: 'ස', 78: 'හ', 79: 'ළ', 80: 'ළු', 81: 'ෆ',\r\n 82: 'ා'}\r\n\r\n\r\n\r\n predictions = model.predict(img)\r\n predictedLetter=labelDictionary.get(classIndex)\r\n probabilityValue = np.amax(predictions)\r\n print(predictedLetter, probabilityValue)\r\n\r\n if probabilityValue > threshold:\r\n cv2.putText(imgOriginal, str(predictedLetter) + \" \" + str(probabilityValue),\r\n (50, 50), cv2.FONT_HERSHEY_COMPLEX,\r\n 1, (0, 0, 255), 1)\r\n\r\n cv2.imshow(\"Testing Window\", imgOriginal)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\n","sub_path":"OCR_CNN_Testing.py","file_name":"OCR_CNN_Testing.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"297232450","text":"import pickle\nimport datetime\n\nimport THESIS2019.utils.to_lexicon as lex\nfrom THESIS2019.utils.base_words import *\nimport THESIS2019.utils.get_articles as get\n\nimport numpy as np\nimport pandas as pd\nfrom collections import defaultdict\nimport nltk\nfrom gensim import corpora\nfrom gensim.models.coherencemodel import CoherenceModel\n\n\n\ndef pickle_dump(outlet, articles, dictionary, corpus, model):\n with open(\"data/\"+outlet+\"_model2012.pkl\",\"wb\") as f:\n pickle.dump(model, f)\n with open(\"data/\"+outlet+\"_articles2012.pkl\",\"wb\") as f:\n pickle.dump(articles, f)\n with open(\"data/\"+outlet+\"_dictionary2012.pkl\",\"wb\") as f:\n pickle.dump(dictionary, f)\n with open(\"data/\"+outlet+\"_corpus2012.pkl\",\"wb\") as f:\n pickle.dump(corpus, f)\n \ndef pickle_load(outlet):\n with open(\"data/\"+outlet+\"_model2012.pkl\",\"rb\") as f:\n model = pickle.load(f)\n with open(\"data/\"+outlet+\"_dictionary2012.pkl\",\"rb\") as f:\n dictionary = pickle.load(f)\n with open(\"data/\"+outlet+\"_articles2012.pkl\",\"rb\") as f:\n articles = pickle.load(f)\n with open(\"data/\"+outlet+\"_corpus2012.pkl\",\"rb\") as f:\n corpus = pickle.load(f)\n return articles, dictionary, corpus, model\n \n\ndef get_lda_models(article_set, load=True, store=False):\n arts_list, dictionary_list, corpus_list, model_list = defaultdict(lambda:0),defaultdict(lambda:0),defaultdict(lambda:0),defaultdict(lambda:0)\n \n for outlet, articles in article_set.items():\n print(\"%s has %d articles\" %(outlet, len(articles)))\n \n if load:\n arts, dictionary, corpus, model = pickle_load(outlet)\n else:\n art_set = {outlet:articles}\n arts, dictionary, corpus, model = lex.LDA(art_set, LEFT_WORDS+RIGHT_WORDS,num_topics=30)\n if store:\n pickle_dump(outlet, arts, dictionary, corpus, model)\n \n arts_list[outlet]=arts\n dictionary_list[outlet]=dictionary\n corpus_list[outlet]=corpus\n model_list[outlet]=model\n \n return arts_list, dictionary_list, corpus_list, model_list\n\n\n\ndef print_topics(model):\n topics = model.show_topics(num_topics=-1, num_words=5, log=False, formatted=False)\n for idx, topic in topics:\n print (\"topic \" + str(idx) + \": \" + (\", \").join([str(t[0]) for t in topic]))\n \n\ndef pmi(wordslist, texts, corpus, dictionary):\n cm = CoherenceModel(topics=wordslist, corpus=corpus, dictionary=dictionary, coherence='u_mass')#coherence='c_npmi')\n return cm.get_coherence()\n\n\n# for each topic, compare to other topic\ndef compare_topics(t1, t2, texts, corpus, dictionary):\n permutations = []\n for w1 in t1:\n for w2 in t2:\n permutations.append([w1,w2])\n return pmi(permutations,texts,corpus,dictionary)\n\n\ndef pairwise_compare_models(model1, model2, texts, corpus, dictionary):\n topics1 = model1.show_topics(num_topics=-1, num_words=10, log=False, formatted=False)\n topics2 = model2.show_topics(num_topics=-1, num_words=10, log=False, formatted=False)\n \n pmis_col = defaultdict(lambda:0)\n for idx1, topic1 in topics1[:5]:\n pmis_row = defaultdict(lambda:0)\n for idx2, topic2 in topics2[:5]:\n t1 = [str(t[0]) for t in topic1]\n t2 = [str(t[0]) for t in topic2]\n print()\n print (\"topic \" + str(idx1) + \": \" + (\", \").join(t1))\n print (\"topic \" + str(idx2) + \": \" + (\", \").join(t2))\n \n pmi = compare_topics(t1, t2, texts, corpus, dictionary)\n print(\"pmi between t%s and t%s: %6.4f\" %(idx1, idx2, pmi))\n\n pmis_row[\"b\"+str(idx2)]=pmi\n pmis_col[\"a\"+str(idx1)]=pmis_row\n df =pd.DataFrame(pmis_col)\n print(df)\n print(list(df.values)[0])\n \n\nif __name__ == '__main__':\n datapath = \"/Users/ninawang/Thesis/remote/THESIS2019/example_data_1000/\"\n # outlets = ['BREITBART','NATIONALREVIEW','FOX',\n # 'WASHINGTONEXAMINER','REUTERS','NPR',\n # 'NYT', 'MSN','CNN','SLATE']\n outlets = ['NYT', 'CNN']\n articles = get.get_articles_outlets(datapath,outlets,2012,filter_date=False)\n\n arts_list, dictionary_list, corpus_list, model_list = get_lda_models(articles, load=True, store=False)\n\n outlet_list = list(articles.keys())\n compare = []\n for i in range(len(outlet_list)):\n for j in range(i+1, len(outlet_list)):\n outlet1 = outlet_list[i]\n outlet2 = outlet_list[j]\n # get models\n model1 = model_list[outlet1]\n model2 = model_list[outlet2]\n # get joint texts, dictionary, corpus\n texts = list(arts_list[outlet1].values())[0]+list(arts_list[outlet2].values())[0]\n dictionary = corpora.Dictionary(texts)\n corpus = [dictionary.doc2bow(text) for text in texts]\n # compare models\n print(\"comparing %s and %s\"%(outlet1, outlet2))\n print_topics(model1)\n print_topics(model2)\n\n\n pairwise_compare_models(model1, model2, texts, corpus, dictionary)\n \n\n\n\n\n# get_coh([[\"olymp\",\"sport\"]],texts,corpus,dictionary)\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"PART1/TopicModel/LDA/pmi_consistency.py","file_name":"pmi_consistency.py","file_ext":"py","file_size_in_byte":5141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"313594405","text":"import sys\n\n# TensorFlow and tf.keras\nimport tensorflow as tf\nfrom tensorflow import keras\n\n# Helper libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n\ndef sigmoid(x):\n\treturn np.exp(x)/( 1. + np.exp(x) )\n\n\n# 1. Prepare data: marks vs hours of studying (reviewing)\nX = np.array([0.50, 0.75, 1.00, 1.25, 1.50, 1.75, 1.75, 2.00, 2.25, 2.50, \n 2.75, 3.00, 3.25, 3.50, 4.00, 4.25, 4.50, 4.75, 5.00, 5.50])\ny = np.array([0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1])\n\nif(True):\n\t# [array([[1.5909579]], dtype=float32), array([-4.091234], dtype=float32)]\n\tplt.plot(X, 1.59*X - 4.09, 'r-')\n\t# plt.plot(X, sigmoid(X), 'b--')\n\tplt.plot(X,y)\n\tplt.show()\n\tsys.exit()\n\n\n# 2. Build model\nxmodel = keras.models.Sequential()\nxmodel.add( keras.layers.Dense( 1, input_shape=(1,) ) )\nxmodel.add( keras.layers.Activation('sigmoid') )\n\n# 3. Gradient descent optimizer and loss function\nsgd = keras.optimizers.SGD(lr=0.05)\nxmodel.compile( loss=keras.losses.binary_crossentropy,\n optimizer=sgd )\n\n# 4. Train the model\nxmodel.fit( X, y, epochs=3000, batch_size=1 )\n\nres = xmodel.get_weights()\n\nprint( res )","sub_path":"basics/logistic_regression_with_keras.py","file_name":"logistic_regression_with_keras.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"536771162","text":"def includeme(config):\n # Use this instead of a separate static view so that everything lives in\n # /static and it's possible to replace something without changing code.\n # Note that the downside here is that you have to use 'spline:assets' even\n # for assets that don't actually live there!\n config.override_asset(\n to_override='spline:assets/',\n override_with='floraverse_com:assets/',\n )\n\n # This is totes gross\n config.registry.settings['mako.directories'].insert(0, 'floraverse_com:templates')\n config.registry.settings['scss.asset_path'] = (\n 'floraverse_com:assets/scss\\n' +\n config.registry.settings['scss.asset_path']\n )\n","sub_path":"floraverse_com/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"363916899","text":"import sys\n\nimport requests\nfrom bs4 import BeautifulSoup\n\npayload = {\n 'user_id': 'sample',\n 'password': 'sample',\n 'login': 'Login',\n 'client_date_string': 'Thu Apr 07 2016 14:09:47 GMT-0600 (Mountain Standard Time)',\n 'tz_offset': -6\n}\n\n\nwith requests.Session() as session:\n try:\n req = session.post(url='https://foodclub.org/sample/login', data=payload)\n req2 = session.get('https://foodclub.org/sample/bookkeeper')\n except requests.RequestException:\n print('Error in request.')\n sys.exit()\n\n soup = BeautifulSoup(req2.text, 'html.parser')\n all_spans = soup.find_all('span')\n\n for one_span in all_spans:\n print(one_span.text)\n","sub_path":"Optum Tech/IN1468 available until 12-31-20/IN1468_student_files/student_files/ch04_network_prog/requests_login.py","file_name":"requests_login.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"399848789","text":"import requests\nimport coreapi\nimport coreschema\nfrom rest_framework import viewsets, authentication\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.schemas import ManualSchema\n\n\nclass SimilarWordsCore(APIView):\n \"\"\"\n Faz uma query de similar_words\n \"\"\"\n schema = ManualSchema(fields=[\n coreapi.Field(\n \"original_text\",\n required=True,\n location=\"query\",\n schema=coreschema.String(min_length=3)\n ),\n coreapi.Field(\n \"original_language\",\n required=True,\n location=\"query\",\n schema=coreschema.String()\n ),\n coreapi.Field(\n \"final_language\",\n required=True,\n location=\"query\",\n schema=coreschema.String()\n ),\n ])\n endpoint = \"http://core:8000/similar_words\"\n\n def get(self, request, format=None):\n r = requests.get(self.endpoint, params=request.query_params)\n\n try:\n data = r.json()\n except:\n data = r.text\n finally:\n return Response(data, status=r.status_code)\n","sub_path":"APP-SERVER/server/similar_words/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"621245096","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom cogdl.utils import spmm\n\n\nclass MeanAggregator(torch.nn.Module):\n def __init__(self, in_channels, out_channels, bias=True):\n super(MeanAggregator, self).__init__()\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.cached_result = None\n\n self.linear = nn.Linear(in_channels, out_channels, bias)\n\n @staticmethod\n def norm(graph, x):\n graph.row_norm()\n x = spmm(graph, x)\n return x\n\n def forward(self, graph, x):\n x = self.linear(x)\n x = self.norm(graph, x)\n return x\n\n def __repr__(self):\n return \"{}({}, {})\".format(self.__class__.__name__, self.in_channels, self.out_channels)\n\n\nclass SumAggregator(torch.nn.Module):\n def __init__(self, in_channels, out_channels, bias=True):\n super(SumAggregator, self).__init__()\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.cached_result = None\n\n self.linear = nn.Linear(in_channels, out_channels, bias)\n\n @staticmethod\n def aggr(graph, x):\n x = spmm(graph, x)\n return x\n\n def forward(self, graph, x):\n x = self.linear(x)\n x = self.aggr(graph, x)\n return x\n\n def __repr__(self):\n return \"{}({}, {})\".format(self.__class__.__name__, self.in_channels, self.out_channels)\n\n\nclass SAGELayer(nn.Module):\n def __init__(self, in_feats, out_feats, normalize=False, aggr=\"mean\"):\n super(SAGELayer, self).__init__()\n self.in_feats = in_feats\n self.out_feats = out_feats\n self.normalize = normalize\n if aggr == \"mean\":\n self.aggr = MeanAggregator(in_feats, out_feats)\n elif aggr == \"sum\":\n self.aggr = SumAggregator(in_feats, out_feats)\n else:\n raise NotImplementedError\n\n def forward(self, graph, x):\n out = self.aggr(graph, x)\n if self.normalize:\n out = F.normalize(out, p=2.0, dim=-1)\n return out\n","sub_path":"cogdl/layers/sage_layer.py","file_name":"sage_layer.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"224088711","text":"n = int(input())\n\nimport sys\nfor i in range(n):\n\tvar_it = map(int, sys.stdin.readline().split())\n\t_n = next(var_it)\n\tl = sorted(var_it)\n\tavg = sum(l) / _n\n\tno = _n\n\tfor j in l:\n\t\tif j > avg: break\n\t\tno -= 1\n\n\tprint(\"{0:00.3f}%\".format(no * 100 / _n))","sub_path":"4000/04434_baekjoon.py","file_name":"04434_baekjoon.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"556155922","text":"import cocos\nfrom cocos.layer import Layer\nfrom cocos.director import director\nfrom cocos.scene import Scene\nfrom cocos.text import Label\nfrom cocos.tiles import RectMap, RectCell, Tile\nimport random\n\n\nclass HelloWorld(cocos.layer.Layer):\n tick = 0\n\n def __init__(self):\n super(HelloWorld, self).__init__()\n label = cocos.text.Label('Hello, World!',\n font_name='Times New Roman',\n font_size=32,\n anchor_x='center', anchor_y='center')\n label.position = 320, 240\n\n colors = ['#FDFDFD', 'AA00A0']\n\n cells = []\n for i in range(10):\n cells.append([])\n for j in range(10):\n cells[i].append(RectCell)\n\n map = RectMap('world', 100, 100, cells)\n self.add(label)\n\n self.tick = 0\n\n self.schedule(self.update)\n\n def update(self, dt):\n\n self.tick += 1\n # print self.tick\n self.rotation = self.tick\n\n # self.rotation += dt * 20\n\n\ncocos.director.director.init()\ncocos.director.director.set_show_FPS(True)\nhello_layer = HelloWorld()\nmain_scene = cocos.scene.Scene(hello_layer)\ncocos.director.director.run(main_scene)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"218355270","text":"from base_handler import BaseHandler\nfrom database.blogs import Post\nfrom database.users import User\n\n\nclass FrontHandler(BaseHandler):\n def get(self, username):\n try:\n user = User.by_name(username)\n user_id = str(user.key().id())\n posts = Post.all_post_by_user(user_id)\n self.render('front.html', posts=posts, username=self.user.name if hasattr(self.user, 'name') else None,\n blogname=user.blog_name, cur_user=self.user.name)\n except:\n self.redirect('/login')\n","sub_path":"handler/front.py","file_name":"front.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"162340359","text":"#抓取PTT電影版的網頁原始碼(HTML)=>要讓我們的連線看起來像是一般使用者,否則會被網頁拒絕\n#1.所以必須先去要抓取資料的網頁選擇開發人員工具,點選network,其中位於最上方的index.html點開按header\n#2.我們必須像裡面的request header一樣給予類似一般使用者的連線許可(裡面的user-agent)\n\nimport urllib.request as request\nurl=\"https://www.ptt.cc/bbs/movie/index9187.html\"\n#使用request模組中的Request函式在網址連線時附加Request Header資訊,將此回傳的物件放入re1物件(變數)中\nre1=request.Request(url,headers={\"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36\"})\n\nwith request.urlopen(re1) as ptt:\n data=ptt.read().decode(\"utf-8\")\n#print(data)\n\n#解析原始碼(這邊必須安裝beautifulsoup4=>下方指令打 pip install beautifulsoup4)\nimport bs4\nroot=bs4.BeautifulSoup(data,\"html.parser\") #讓第三方套件幫我們解析data資料(html格式=>\"html.parser\")\nprint(root.title) #解析完畢後,要使用的方法=>操作物件(變數).標籤名稱(<>角括弧中的字)\nprint(root.title.string) #上面一行程式可抓到=> 看板 movie 文章列表 - 批踢踢實業坊\n #,這行程式可抓到=> 看板 movie 文章列表 - 批踢踢實業坊\n\ntitles=root.find(\"div\",class_=\"title\") #尋找class=\"title\"的div標籤,find()工具只找一個\nprint(titles.a.string+\"\\n\") #titles代表抓到class=\"title\"的div標籤\n #titles.a代表抓到class=\"title\"的div標籤中的a標籤\n\ntitles=root.find_all(\"div\",class_=\"title\") #尋找class=\"title\"的div標籤,find_all()工具會找所有的,並給我們一個列表(list)\nfor i in titles:\n if i.a !=None:\n print(i.a.string) #i代表抓到class=\"title\"的div標籤\n #i.a代表抓到class=\"title\"的div標籤中的a標籤","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"220442712","text":"# created on Dec 24, 2020\n# @author: Bo Zhao\n# @email: zhaobo@uw.edu\n# @website: https://hgis.uw.edu\n# @organization: Department of Geography, University of Washington, Seattle\n# @description: Search geo-tagged tweets within the U.S. This script is modified from https://github.com/shawn-terryah/Twitter_Geolocation\n\nimport tweepy, json, time, sqlite3\n\nclass StreamListener(tweepy.StreamListener):\n \"\"\"tweepy.StreamListener is a class provided by tweepy used to access\n the Twitter Streaming API to collect tweets in real-time.\n \"\"\"\n def __init__(self, time_limit=60, dbfile=\"\"):\n \"\"\"class initialization\"\"\"\n self.start_time = time.time()\n self.limit = time_limit\n self.dbfile = dbfile\n super(StreamListener, self).__init__()\n\n def on_data(self, data):\n \"\"\"This is called when data are streamed in.\"\"\"\n\n conn = sqlite3.connect(self.dbfile)\n cursor = conn.cursor()\n\n if (time.time() - self.start_time) < self.limit:\n datajson = json.loads(data)\n print (datajson)\n id = datajson['id']\n username = datajson['user']['screen_name']\n created_at = datajson['created_at']\n text = datajson['text'].strip().replace(\"\\n\", \"\").replace('\"', '\\\"').replace(\"'\", \"\\\"\")\n\n # process the geo-tags\n if datajson['coordinates'] == None:\n bbox = datajson['place']['bounding_box']['coordinates'][0]\n lng = (bbox[0][0] + bbox[2][0]) / 2.0\n lat = (bbox[0][1] + bbox[1][1]) / 2.0\n else:\n lng = datajson['coordinates']['coordinates'][0]\n lat = datajson['coordinates']['coordinates'][1]\n\n insert_record_sql = \"INSERT OR REPLACE INTO geotweets (id, username, created_at, lng, lat, text) VALUES (%d, '%s', '%s', %f, %f, '%s')\" % (id, username, created_at, lng, lat, text)\n cursor.execute(insert_record_sql)\n conn.commit()\n\n record = (id, username, created_at, lng, lat, text)\n print (record)\n else:\n conn.close()\n print (\"finished.\")\n return False\n\nif __name__ == \"__main__\":\n # These are provided to you through the Twitter API after you create a account\n # register a Twitter App to get the keys and access tokens.\n\n dbname = 'assets/tweets.db'\n\n # Apply for your own Twitter API keys at https://developer.twitter.com/en/apply-for-access\n consumer_key = \"your_consumer_key\"\n consumer_secret = \"your_consumer_secret\"\n access_token = \"your_access_token\"\n access_token_secret = \"your_access_token_secret\"\n\n myauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n myauth.set_access_token(access_token, access_token_secret)\n\n # LOCATIONS are the longitude, latitude coordinate corners for a box that restricts the\n # geographic area from which you will stream tweets. The first two define the southwest\n # corner of the box and the second two define the northeast corner of the box.\n LOCATIONS = [-124.7771694, 24.520833, -66.947028, 49.384472, # Contiguous US\n -164.639405, 58.806859, -144.152365, 71.76871, # Alaska\n -160.161542, 18.776344, -154.641396, 22.878623] # Hawaii\n\n stream_listener = StreamListener(time_limit=20, dbfile=dbname)\n stream = tweepy.Stream(auth=myauth, listener=stream_listener)\n stream.filter(locations=LOCATIONS)\n","sub_path":"04_data/backup/tw2db.py","file_name":"tw2db.py","file_ext":"py","file_size_in_byte":3478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"517545371","text":"# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2015-2016 Hewlett Packard Enterprise Development LP\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nTests for hosts\n\"\"\"\n\nTOPOLOGY = \"\"\"\n[image=\"ubuntu:12.04\" type=host name=\"Host 1\"] hs1\n[type=host name=\"Host 1\"] hs2\n\nhs1:1 -- hs2:1\n\"\"\"\n\n\ndef test_image(topology, step):\n \"\"\"\n Test that a vlan configuration is functional with a OpenSwitch switch.\n \"\"\"\n hs1 = topology.get('hs1')\n hs2 = topology.get('hs2')\n\n assert '12.04' in (hs1('cat /etc/issue', shell='bash'))\n assert '14.04' in (hs2('cat /etc/issue', shell='bash'))\n","sub_path":"test/test_topology_docker_nodes_host.py","file_name":"test_topology_docker_nodes_host.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"413748112","text":"from nose.tools import *\nfrom app import app\nfrom tools import assert_response\n\nclient = app.test_client() #create a testing client (like a fake web browser)\nclient.testing = True #enable this so that errors in your web app bubble up to the testing client\n\ndef test_index():\n global client # let python know you want to use the global client variable in this function\n # Check that we get a 404 on the / URL\n resp = client.get('/')\n assert_response(resp, status=302) # the root url should give back a redirect\n\n # test to make sure a GET request to /hello works (returns a 200 status code)\n resp = client.get('/game')\n assert_response(resp) # just make sure we got a valid response\n\n resp = client.post('/game') # use POST, but provide no data\n assert_response(resp, contains=\"The Game is Over or something went wrong.\")\n\n # Go to another scene in the game\n testdata = {'userinput': 'right'}\n resp = client.post('/game', data=testdata)\n assert_response(resp, contains=\"Right Room\")\n","sub_path":"gothonweb/tests/app_tests.py","file_name":"app_tests.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"589537831","text":"# dual_link_List 双向链表\n\n\nclass DualNode(object):\n\t\"\"\"docstring for DualNode\"\"\"\n\tdef __init__(self, val=None):\n\n\t\tself.val = val\n\t\tself.prior = None\n\t\tself.next = None\n\t\tself.visited = False\n\n\nclass DualLinkList(object):\n\t\"\"\"docstring for DualLinkList\"\"\"\n\tdef __init__(self):\n\t\tself.head = None\n\n\tdef createList(self,num):\n\t\t\"\"\"创建num个节点的双向链表\"\"\"\n\t\tif isinstance(num, int) == False:\n\t\t\tprint(\"error: not type:int\")\n\t\t\treturn\n\t\tif num <=0:\n\t\t\treturn None\n\n\t\thead = None\n\t\tval = chr(65)\n\t\tcur = None\n\t\tn = 0\n\t\twhile num > 0:\n\t\t\tval = chr(65 + n)\n\t\t\tnode = DualNode(val)\n\t\t\tif head is None:\n\t\t\t\thead = node\n\t\t\t\tcur = head\n\t\t\telse:\n\t\t\t\tnode.next = cur.next\n\t\t\t\tnode.prior = cur\n\t\t\t\tcur.next = node\n\t\t\t\tcur = cur.next\n\t\t\tn += 1\n\t\t\tnum -= 1\n\t\tself.head = head\n\t\treturn head\n\n\tdef createCircleList(self, num):\n\t\t\"\"\"创建num个节点的双向循环链表\"\"\"\n\t\tif isinstance(num, int) == False:\n\t\t\tprint(\"error: not type:int\")\n\t\t\treturn\n\t\tif num <=0:\n\t\t\treturn None\n\n\t\thead = None\n\t\tval = chr(65)\n\t\tcur = None\n\t\tn = 0\n\t\twhile num > 0:\n\t\t\tval = chr(65 + n)\n\t\t\tnode = DualNode(val)\n\t\t\tif head is None:\n\t\t\t\thead = node\n\t\t\t\tcur = head\n\t\t\telse:\n\t\t\t\tnode.next = cur.next\n\t\t\t\tnode.prior = cur\n\t\t\t\tcur.next = node\n\t\t\t\tcur = cur.next\n\t\t\tn += 1\n\t\t\tnum -= 1\n\t\tcur.next = head\n\t\thead.prior = cur\t\n\t\tself.head = head\n\t\treturn head\n\t\t\t\n\tdef travel(self, head):\n\t\t\"\"\"遍历链表\"\"\"\t\n\t\tif isinstance(head, DualNode) == False:\n\t\t\tprint('error: This arguement is invalid')\n\t\t\treturn\n\t\tif head is None:\n\t\t\tprint('None')\n\t\t\treturn\n\t\tcur = head \n\t\twhile cur.next != head and cur.next != None:\n\t\t\tprint(f'{cur.val}', end='')\n\t\t\tcur = cur.next\n\t\tprint(f'{cur.val}')\n\n\tdef caesar(self,n):\n\t\t\"\"\"Caesar Code加密模式\"\"\"\n\t\tif isinstance(n, int) == False:\n\t\t\tprint(\"error: not type:int\")\n\t\t\treturn\n\t\t\n\t\thead = self.createCircleList(26)\t\n\t\tif n == 0:\n\t\t\tself.travel(head)\n\t\telif n < 0:\n\t\t\tm = n\n\t\t\twhile m < 0:\n\t\t\t\thead = head.prior\n\t\t\t\tm += 1\n\t\t\tself.travel(head)\n\t\telse:\n\t\t\tfor _ in range(n):\n\t\t\t\thead = head.next\n\t\t\tself.travel(head)\t\t\n\n\nl = DualLinkList()\nhead = l.createCircleList(26)\nl.travel(head)\nl.caesar(-3)\nprint(len('13'))\n\n# a = chr(65)\n# m = ord('A')\n# print(a,m)","sub_path":"线性表操作/链表/双向链表.py","file_name":"双向链表.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"508631315","text":"# This class handles the button input from Arduino\n\n\nclass Input:\n # Required for communication with Arduino\n import pyfirmata\n from pyfirmata import Arduino, util\n import time\n\n from sound import Sound\n\n def __init__(self):\n\n # Set port\n self.port = \"COM5\"\n self.ard = Input.pyfirmata.Arduino(self.port)\n\n # Set button pins\n self.b0 = self.ard.get_pin(\"d:2:i\").pin_number # Digital pin 2, Output\n self.b1 = self.ard.get_pin(\"d:3:i\").pin_number # Digital pin 3, Output\n self.b2 = self.ard.get_pin(\"d:4:i\").pin_number # Digital pin 4, Output\n self.b3 = self.ard.get_pin(\"d:5:i\").pin_number # Digital pin 5, Output\n self.b4 = self.ard.get_pin(\"d:6:i\").pin_number # Digital pin 6, Output\n self.b5 = self.ard.get_pin(\"d:7:i\").pin_number # Digital pin 7, Output\n self.b6 = self.ard.get_pin(\"d:8:i\").pin_number # Digital pin 8, Output\n self.buttons = [self.b0, self.b1, self.b2, self.b3, self.b4, self.b5, self.b6] # Load buttons into array for easy looping\n\n # Instance of Iterator reads and handles data from Arduino over the serial port,\n # it keeps the boards pin values up to date\n self.iterator = Input.pyfirmata.util.Iterator(self.ard)\n self.iterator.start()\n\n # Check if digital pin is read FALSE (when pressed)\n def check_pressed(self, pin_num):\n pressed = self.ard.digital[pin_num].read()\n\n if pressed == 0:\n # Return corresponding column value for the specific button (directly related to button)\n if self.pin_num == self.b0:\n return 0\n elif self.pin_num == self.b1:\n return 1\n elif self.pin_num == self.b2:\n return 2\n elif self.pin_num == self.b3:\n return 3\n elif self.pin_num == self.b4:\n return 4\n elif self.pin_num == self.b5:\n return 5\n elif self.pin_num == self.b6:\n return 6\n else:\n return None\n\n\n","sub_path":"venv/input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"329716706","text":"#coding: utf-8\n\n#エクセルファイルの特定のセルを抜き出してテキストに書き出す\n#ライブラリxlrdをインストールしておく(pip install xlrd)\nimport xlrd\nimport csv\n\nFILE = \"/home/shirahama/output.txt\"\n\nif __name__ == \"__main__\":\n\n #エクセルファイルを指定\n book = xlrd.open_workbook('test.xlsx')\n f = open(FILE,\"w\")\n\n #作業シートを指定\n sheet_1 = book.sheet_by_index(0)\n #nrowsで行数、ncolsで列数を取得\n for row in range(sheet_1.nrows):\n plist = []\n val = sheet_1.cell(row,0).value\n #1列目が特定の文字列(\"テスト”または\"てすと\")の場合のみ対象とする\n if val == (u\"テスト\") or val == (u\"てすと\"):\n #列[1,4,5]を指定してリスト化 \n for col in [1,4,5]:\n output = sheet_1.cell(row,col).value\n output = output.encode('utf-8')\n plist.append(output)\n #リストの要素[1]が特定の文字列(\"TEST\")の場合のみ書き出す\n if plist[1] == \"TEST\":\n f = open(FILE,\"a\")\n csvwrite = csv.writer(f)\n csvwrite.writerow(plist)\n #print plist\n","sub_path":"read_xls.py","file_name":"read_xls.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"515020383","text":"# import the necessary packages\nimport dlib\nimport pyautogui\nimport ctypes\nfrom face_utils import get_face_landmarks\nfrom face_utils import get_eyes\nfrom train import get_trained_models\npyautogui.FAILSAFE = False\nfrom utils import *\nfrom pynput.mouse import Button, Listener\nimport threading\n\n# is used for error in pixels\ncount = 0\nsum = 0\ndef get_precision(x, y, px, py):\n global count, sum\n count +=1\n sum += distance2D(x, y, px, py)\n return sum/count\n\n#-------------------------- mouse listener--------------------------\n\n#adds the new data created by the mouse clikcs\ndef add_new_data(dataset_path, new_image, x, y):\n\n # getting the data for appending the new picture to the folder\n counter_path = dataset_path + \"/counter.txt\"\n i = get_counter(counter_path)\n try:\n fh = open(dataset_path + \"/coordinates.txt\", \"rb\")\n coordinates = pickle.load(fh)\n except IOError:\n print(\"Error: can't find file the coordinates.txt, thus creating one\")\n coordinates = []\n else:\n print(\"read the coordinates.txt file succesfully\")\n fh.close()\n\n # getting the eye pictures\n landmarks = get_face_landmarks(predictor, detector, new_image)\n left, right = get_eyes(new_image, landmarks)\n\n # saving multiple pictures of the new data because it is more valuable\n for j in range(10):\n cv2.imwrite(dataset_path + \"/{}_left.jpg\".format(i), left)\n cv2.imwrite(dataset_path + \"/{}_right.jpg\".format(i), right)\n coordinates.append((i, (x, y)))\n i = i + 1\n\n #updating the coordinates and the counter\n update_counter(counter_path, i)\n with open(dataset_path + \"/coordinates.txt\", \"wb\") as fp:\n pickle.dump(coordinates, fp)\n\n# the handler of the mouse click\n\ndef on_click(x, y, button, pressed):\n global dataset_path\n if button == Button.left:\n print(\"left button of mouse is pressed at:\", x, y)\n add_new_data(dataset_path, webcam_img, x, y)\n print(\"new data is added\")\n thread = threading.Thread(name='retrain-Thread', target=retrain_thread, args=())\n thread.start()\n print(\"retrain thread is created and started\")\n thread.join()\n\ndef retrain_thread():\n global modelx, modely, dataset_path, color_object, model_version\n print(\"Thread retrain is starting\")\n X, y_x, y_y = load_dataset(dataset_path)\n print(\"shape of dataset: \", len(X))\n\n y = []\n for i in range(len(y_x)):\n y.append((y_x[i], y_y[i]))\n\n modelx, modely = get_trained_models(X, y, type=\"linear\", save_name=model_version+\"_retrain\") #make it general\n color_object.change()\n print(\"model has been updated\")\n print(\"Thread retrain is finishing\")\n\n\n#------------------------------------------------------main------------------------------------\n#------------------global variables----------------------\ndataset_path = \"pictures/stable2_retrain2\"\npredictor_path = \"shape_predictor_68_face_landmarks.dat\" # =args[\"shape_predictor\"]\nmodel_version = \"linear_stable2\"\n\n#-----------------------------------------------------------------\nfilenamex = 'models/modelx_'+model_version+'.sav'\nfilenamey = 'models/modely_'+model_version+'.sav'\n\ncolor_object = pointer_color()\ncolor = color_object.get_color()\n\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor(predictor_path)\n\nmodelx = pickle.load(open(filenamex, 'rb'))\nmodely = pickle.load(open(filenamey, 'rb'))\n\n\n# creating the canvas for showing the results\nuser32 = ctypes.windll.user32\nwidth, height = user32.GetSystemMetrics(0), user32.GetSystemMetrics(1)\nblank_image = np.ones((height, width, 3), np.uint8)\n\ncv2.namedWindow(\"full_window\", cv2.WND_PROP_FULLSCREEN)\ncv2.setWindowProperty(\"full_window\",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)\ncv2.imshow(\"full_window\", blank_image)\n\n\nlistener = Listener(on_click=on_click)\nlistener.start()\n\ncap = cv2.VideoCapture(0)\nwhile cap.isOpened():\n _, webcam_img = cap.read()\n img = webcam_img\n mousex, mousey = pyautogui.position()\n\n landmarks = get_face_landmarks(predictor, detector, img)\n if len(landmarks) < 1:\n print('ERROR:: no face detected')\n continue\n\n limg, rimg = get_eyes(img, landmarks)\n\n limg = cv2.cvtColor(limg, cv2.COLOR_BGR2GRAY)\n limg = cv2.equalizeHist(limg)\n cv2.imshow(\"leftt\", limg)\n limg = np.array(limg)\n limg = limg.flatten()\n\n rimg = cv2.cvtColor(rimg, cv2.COLOR_BGR2GRAY)\n rimg = cv2.equalizeHist(rimg)\n cv2.imshow(\"rightt\", rimg)\n rimg = np.array(rimg)\n rimg = rimg.flatten()\n\n img = np.concatenate((limg, rimg))\n\n #-------------------geting the prediction------------------\n predx = round(modelx.predict([img])[0])\n predy = round(modely.predict([img])[0])\n\n cv2.circle(blank_image, (predx, predy), 10, color_object.get_color(), 5)\n cv2.imshow(\"full_window\", blank_image)\n blank_image = np.ones((height, width, 3), np.uint8)\n\n #--------------------evaluate------------------------------\n result_totall = get_precision(mousex, mousey, predx, predy)\n result_x = get_precision(mousex, 0, predx, 0)\n result_y = get_precision(0, mousey, 0, predy)\n '''\n print(\"x & y: \", mousex, mousey, \"-\", predx, predy, \"->\", result_totall)\n print(\"x: \", mousex, \"-\", predx, \"->\", result_x)\n print(\"y: \", mousey, \"-\", predy, \"->\", result_y)\n '''\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncap.release()\nprint(\"Results:\")\nprint(\"x & y: \",result_totall)\nprint(\"x: \", result_x)\nprint(\"y: \", result_y)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"334077731","text":"# mandelbrot.py\r\n# Lab 9\r\n#\r\n# Name: Michael Reilly\r\n\r\n# keep this import line...\r\nfrom cs5png import PNGImage\r\n\r\n# start your Lab 9 functions here:\r\n\r\ndef mult(c,n):\r\n '''multiplies two numbers together by adding c n number of times to zero\r\nby looping'''\r\n result=0\r\n for x in range(n):\r\n result+=c\r\n x+=1\r\n return result\r\ndef update(c,n):\r\n '''updates the value of z starting at 0 by z**2+c n number of times\r\nby looping'''\r\n z=0\r\n for x in range(n):\r\n z=z**2+c\r\n x+=1\r\n return z\r\ndef inMSet(c,n):\r\n '''Returns False if the sequence Zn+1=Zn^2+c ever yields a z value\r\nehose magnitude is greater than 2. It should return True otherwise'''\r\n z=0\r\n for x in range(n):\r\n z=z**2+c\r\n x+=1\r\n if abs(z)>2:\r\n return False\r\n return True\r\ndef weWantThisPixel( col, row ):\r\n \"\"\" a function that returns True if we want\r\nthe pixel at col, row and False otherwise\"\"\"\r\n if col%10 == 0 and row%10 == 0:\r\n return True\r\n else:\r\n return False\r\ndef test():\r\n \"\"\" a function to demonstrate how\r\nto create and save a png image\"\"\"\r\n width = 300\r\n height = 200\r\n image = PNGImage(width, height)\r\n # create a loop in order to draw some pixels\r\n for col in range(width):\r\n for row in range(height):\r\n if weWantThisPixel( col, row ) == True:\r\n image.plotPoint(col, row)\r\n # we looped through every image pixel; we now write the file\r\n image.saveFile()\r\n'''When change if col%10==0 and row%10==0: to if col%10==0 or row%10==0:\r\nthiss causes the image to change from a set of individual dots to a set\r\nof grid lines because it will work when as long as at least one of\r\nrow or col is a multiple of 10 regardless of where the other one is,\r\ncausing more points to work making the individual points look like a grid,\r\nunlike when it is an and statement where many less points making an image\r\nof individual points of pixels where both row and col are multiples of 10'''\r\ndef scale(pix, pixelMax, floatMin, floatMax):\r\n '''Returns the value of pix/pixmax within the range of\r\nfloatMin and floatMax'''\r\n if pix==0:\r\n return floatMin\r\n elif pix==pixelMax:\r\n return floatMax\r\n else:\r\n k=1.0*pix / pixelMax\r\n N=floatMax-floatMin\r\n L=k*N + floatMin\r\n return L\r\ndef mset(width, height):\r\n \"\"\" a function that creates the mandelbrot set png image\"\"\"\r\n image = PNGImage(width, height)\r\n # create a loop in order to draw some pixels\r\n for col in range(width):\r\n for row in range(height):\r\n x=scale(col, width, -2.0, 1.0)\r\n y=scale(row, height, -1.0, 1.0)\r\n c=x+y*1j\r\n n=25\r\n if inMSet( c, n ) == True:\r\n image.plotPoint(col, row)\r\n # we looped through every image pixel; we now write the file\r\n image.saveFile()\r\n","sub_path":"mandelbrot3/mandelbrot.py","file_name":"mandelbrot.py","file_ext":"py","file_size_in_byte":2902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"83340133","text":"\nfrom django.utils.safestring import mark_safe\nfrom django.forms.widgets import DateInput\n\nfrom cc_testing import settings\n\nclass CustomDateInput(DateInput):\n \n class Media:\n css = {'all': \\\n ('/media/css/datepicker/smoothness/jquery-ui-1.8.custom.css',\n )\n } \n js = ('/media/js/jquery-ui-1.8.custom.min.js',\n '/media/js/calendar_initial.js')\n \n def render(self, name, value, attrs=None):\n super(CustomDateInput, self).render(name, value, attrs)\n return mark_safe(\n u'' % \\\n {'name': name, 'value': value})\n ","sub_path":"base/widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"261281267","text":"# -*- coding: utf-8 -*-\nclass node:\n def __init__(self, name, parent, val):\n self.name = name\n self.parent = parent\n self.val = val\n\ndef nodes(graph, start):\n ns = {}\n for k in graph.keys():\n if k != start:\n ns[k] = node(k, \"\", float(\"inf\"))\n for k in graph[start].keys():\n ns[k].parent = start\n ns[k].val = graph[start][k]\n for k in ns.keys():\n costs.append(ns[k])\n\ndef minnode(costs):\n c = None\n mv = float('inf')\n for n in costs:\n if n.name in processed:\n continue\n if n.val < mv:\n c = n\n mv = n.val\n return c\n\n\n\ngraph = {}\ncosts = []\nwait_process = []\nprocessed = []\ndef searchminnode():\n m = minnode(costs)\n if m == None:\n return\n if m.name in processed:\n return\n processed.append(m.name)\n for zi in graph[m.name].keys():\n for n in costs:\n if zi == n.name:\n v = m.val + graph[m.name][zi]\n if v < n.val:\n n.val = v\n n.parent = m.name\n searchminnode() \n return\n\ndef graph1():\n graph[\"start\"] = {}\n graph[\"start\"][\"a\"] = 6\n graph[\"start\"][\"b\"] = 2\n graph[\"a\"] = {}\n graph[\"a\"][\"end\"]=1\n graph[\"b\"] = {}\n graph[\"b\"][\"a\"]=3\n graph[\"b\"][\"end\"]=5\n graph[\"end\"] = {}\n\ndef graph2():\n graph[\"start\"] = {}\n graph[\"start\"][\"A\"] = 5\n graph[\"start\"][\"B\"] = 2\n graph[\"A\"] = {}\n graph[\"A\"][\"C\"] = 4\n graph[\"A\"][\"D\"] = 2\n graph[\"B\"] = {}\n graph[\"B\"][\"A\"] = 8\n graph[\"B\"][\"D\"] = 7\n graph[\"C\"] = {}\n graph[\"C\"][\"D\"] = 6\n graph[\"C\"][\"end\"] = 3\n graph[\"D\"] = {}\n graph[\"D\"][\"end\"] = 1\n graph[\"end\"] = {}\n\ndef printcosts():\n for c in costs:\n print(c.parent, c.name, c.val)\n\ndef main():\n graph2()\n print(graph)\n nodes(graph, \"start\")\n for k in graph.keys():\n if k != \"start\":\n wait_process.append(k)\n print(wait_process)\n searchminnode()\n printcosts()\n return\n \nif __name__ == \"__main__\":\n main()","sub_path":"ch07/Dijkstra.py","file_name":"Dijkstra.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"144648081","text":"import os\nimport glob\n\n### Path Info ###\n# TRAIN_DATA_PATH = 'C:\\\\python\\\\source\\\\Portpolio\\\\Dog_Cat\\\\data\\\\train'\n# TEST_DATA_PATH = 'C:\\\\python\\\\source\\\\Portpolio\\\\Dog_Cat\\\\data\\\\test'\n# VALIDATION_DATA_PATH = 'C:\\\\python\\\\source\\\\Portpolio\\\\Dog_Cat\\\\data\\\\validation'\n# TRAIN_FILE_LIST = glob.glob(os.path.join(TRAIN_DATA_PATH, '*.csv'), recursive=False)\n# TEST_FILE_LIST = glob.glob(os.path.join(TEST_DATA_PATH, '*.csv'), recursive=False)\n# VALIDATION_FILE_LIST = glob.glob(os.path.join(VALIDATION_DATA_PATH, '*.csv'), recursive=False)\nDATA_PATH = 'C:\\\\python\\\\source\\\\Portpolio\\\\Cifar10_classify\\\\data\\\\cifar10_dataset'\nWORKING_DIR_PATH = 'C:\\\\python\\\\source\\\\Portpolio\\\\Cifar10_classify\\\\Model_mobilenet'\nCKPT_DIR_PATH = os.path.join(WORKING_DIR_PATH, 'log')\nCKPT_FILE = os.path.join(CKPT_DIR_PATH, 'save.ckpt')\n\n### Export or Restore Checkpoint ###\nRESTORE = False\nEXPORT = False\n\n### Hyper Parameter ###\n\n# common\nEPOCHS = 100 # total train epochs\nSAVE_EPOCHS = 1 # saving terms\nBATCH_SIZE = 100 # batch_size\nLABEL_CNT = 10 # label's class count (dog or cat -> 2)\nOPTIMIZER = 'adam' # Adam / RMSProp (Modify vggnet.py/ select_optimizer to add new optimizer)\nACTIVATION_FN = 'elu' # Swish / elu (Modify vggnet.py/ select_activation_fn to add new activation_fn)\nLEARNING_RATE = 3e-3 # initial learning rate\nDECAY_STEPS = 1000 # learning rate decay step\nDECAY_RATE = 0.9 # learning rate decay rate\nSTAIRCASE = False # learning rate decay staircase\nBATCHNORM_DECAY_RATE = 0.9 # batch norm decay rate (default : 0.999 / if good train & bad validation : 0.9 recommended)\nL2_REG_RATE = 1e-3 # l2 regularization rate\nDROPOUT_KEEP_PROB = 0.5 # dropout rate\n\n# base mobilenet model\nWIDTH_MULTIPLIER = 1.0 # mobilenet width multiplier\n\n# hybrid mobilenet model\nDEPTH_MULTIPLIER = 2.0 # mobilenet depth multiplier","sub_path":"Portpolio/Cifar10_classify/Model_mobilenet/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"465635059","text":"class Node:\r\n\r\n def __init__(self, value):\r\n self.value = value\r\n self.next = None\r\n\r\ndef reversed_list(head):\r\n # 关于边界条件\r\n if head is None:\r\n return head\r\n # 其实下面的操作可以兼容这部分\r\n # 但卸载这里可以更快中断函数\r\n if head.next is None:\r\n return head\r\n\r\n p_prev = None\r\n p_node = head\r\n p_reversed_head = None\r\n\r\n while p_node != None:\r\n # 在翻转前保存下一节点,防止翻转时出现断链\r\n p_next = p_node.next\r\n if p_next is None:\r\n # 判断p_node是否为最后结点,如果是\r\n # 它就是翻转后链表的头结点\r\n p_reversed_head = p_node\r\n\r\n # 翻转p_prev和p_node\r\n p_node.next = p_prev\r\n # p_prev p_node 向后移动一个结点\r\n p_prev = p_node\r\n p_node = p_next\r\n\r\n return p_reversed_head\r\n\r\ndef test():\r\n values = list(range(1, 11))\r\n print(' '.join([str(v) for v in values]))\r\n\r\n head = Node(0)\r\n p_node = head\r\n for v in values:\r\n p_node.next = Node(v)\r\n p_node = p_node.next\r\n\r\n head = head.next\r\n reversed_list_head = reversed_list(head)\r\n p_node = reversed_list_head\r\n while p_node:\r\n print(p_node.value, end=' ')\r\n p_node = p_node.next\r\n\r\nif __name__ == '__main__':\r\n test()\r\n","sub_path":"list/reversed_list.py","file_name":"reversed_list.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"583337363","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Nov 8 22:20:06 2018\r\n\r\n@author: Aniruddha choudhury\r\n\"\"\"\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn import datasets\r\n\r\ntrain =pd.read_csv('train.csv').drop('Unnamed: 0',axis=1)\r\ntest=pd.read_csv('test.csv').drop('Unnamed: 0',axis=1)\r\n\r\ncleancolumn = []\r\nfor i in range(len(train.columns)):\r\n cleancolumn.append(train.columns[i].replace('-', '').lower())\r\ntrain.columns = cleancolumn\r\n\r\n# Actual replacement of the missing value using median value.\r\ntrain = train.fillna((train.median()))\r\n\r\n\r\nX=train.drop('seriousdlqin2yrs',axis=1)\r\ny=train.seriousdlqin2yrs\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.30, random_state = 0)\r\n\r\nfrom sklearn.preprocessing import StandardScaler\r\nsc = StandardScaler()\r\nX_train = sc.fit_transform(X_train)\r\nX_test = sc.transform(X_test)\r\nX_train, y_train = np.array(X_train), np.array(y_train)\r\n\r\nfrom sklearn.decomposition import PCA\r\npca = PCA(n_components = None)\r\nX_train = pca.fit_transform(X_train)\r\nX_test = pca.transform(X_test)\r\nexplained_variance = pca.explained_variance_ratio_\r\n\r\n\r\nfrom sklearn.svm import SVC\r\nclassifier = SVC(kernel = 'linear', random_state = 0)\r\nclassifier.fit(X_train, y_train)\r\n\r\n#Fitting Random Forest Classification to the Training set\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nclassifier = RandomForestClassifier(n_estimators = 200, criterion = 'entropy', random_state = 0)\r\nclassifier.fit(X_train, y_train)\r\n\r\n# Predicting the Test set results\r\ny_pred = classifier.predict(X_test)\r\n\r\n# Making the Confusion Matrix\r\nfrom sklearn.metrics import confusion_matrix\r\ncm = confusion_matrix(y_test, y_pred)\r\n\r\nfrom sklearn.metrics import accuracy_score\r\nprint('Accuracy: %.2f' % accuracy_score(y_test,y_pred))\r\n\r\nfrom sklearn.model_selection import GridSearchCV\r\nparameters = [{'C': [1, 10, 100, 1000], 'kernel': ['linear']},\r\n {'C': [1, 10, 100, 1000], 'kernel': ['rbf'], 'gamma': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]}]\r\ngrid_search = GridSearchCV(estimator = classifier,\r\n param_grid = parameters,\r\n scoring = 'accuracy',\r\n cv = 10,\r\n n_jobs = -1)\r\ngrid_search = grid_search.fit(X_train, y_train)\r\nbest_accuracy = grid_search.best_score_\r\nbest_parameters = grid_search.best_params_\r\n\r\nfrom sklearn.model_selection import cross_val_score\r\naccuracies = cross_val_score(estimator = classifier, X = X_train, y = y_train, cv = 10)\r\naccuracies.mean()\r\naccuracies.std()\r\n\r\n\r\n\r\n\r\nfrom matplotlib.colors import ListedColormap\r\nX_set, y_set = X_train, y_train\r\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),\r\n np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))\r\nplt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),\r\n alpha = 0.75, cmap = ListedColormap(('red', 'green', 'blue')))\r\nplt.xlim(X1.min(), X1.max())\r\nplt.ylim(X2.min(), X2.max())\r\nfor i, j in enumerate(np.unique(y_set)):\r\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], alpha=1.0, \r\n c = ListedColormap(('red', 'green', 'blue'))(i), label = j)\r\nplt.title('Logistic Regression (Training set)')\r\nplt.xlabel('PC1')\r\nplt.ylabel('PC2')\r\nplt.legend()\r\nplt.show()\r\n\r\nfrom matplotlib.colors import ListedColormap\r\nX_set, y_set = X_test, y_test\r\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),\r\n np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))\r\nplt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),\r\n alpha = 0.75, cmap = ListedColormap(('red', 'green', 'blue')))\r\nplt.xlim(X1.min(), X1.max())\r\nplt.ylim(X2.min(), X2.max())\r\nfor i, j in enumerate(np.unique(y_set)):\r\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\r\n c = ListedColormap(('red', 'green', 'blue'))(i), label = j)\r\nplt.title('Logistic Regression (Test set)')\r\nplt.xlabel('PC1')\r\nplt.ylabel('PC2')\r\nplt.legend()\r\nplt.show()","sub_path":"Credit_ML_model.py","file_name":"Credit_ML_model.py","file_ext":"py","file_size_in_byte":4311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"160651398","text":"# -*- coding: utf-8 -*-\nimport sys\nimport os\nimport datetime\n\n# Sphinx needs to be able to import the package to use autodoc and get the\n# version number\nsys.path.append(os.path.pardir)\n\nfrom gmt import __version__\n\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.coverage',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.doctest',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.extlinks',\n 'numpydoc',\n 'nbsphinx',\n]\n\n# Produce pages for each class and function\nautosummary_generate = True\nautodoc_default_flags = ['members', 'inherited-members']\n\n# Sphinx project configuration\ntemplates_path = ['_templates']\nexclude_patterns = ['_build']\nsource_suffix = '.rst'\n# The encoding of source files.\nsource_encoding = 'utf-8-sig'\nmaster_doc = 'index'\n\n# General information about the project\nyear = datetime.date.today().year\nproject = u'GMT/Python'\ncopyright = u'2017, Leonardo Uieda'\nif len(__version__.split('-')) > 1 or __version__ == 'unknown':\n version = 'dev'\nelse:\n version = __version__\n\n# These enable substitutions using |variable| in the rst files\nrst_epilog = \"\"\"\n.. |year| replace:: {year}\n\"\"\".format(year=year)\n\nhtml_last_updated_fmt = '%b %d, %Y'\nhtml_title = 'GMT/Python'\nhtml_short_title = 'GMT/Python'\nhtml_logo = ''\n# html_favicon = u'favicon.ico'\nhtml_static_path = ['_static']\nhtml_extra_path = ['.nojekyll']\npygments_style = 'default'\nadd_function_parentheses = False\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n# If false, no module index is generated.\n#html_domain_indices = True\n# If false, no index is generated.\n#html_use_index = True\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n# If true, links to the reST sources are added to the pages.\nhtml_show_sourcelink = True\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\nhtml_show_sphinx = True\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\nhtml_show_copyright = True\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'gmt-python'\n\n# Theme config\nhtml_theme = 'alabaster'\nhtml_sidebars = {\n '**': [\n 'about.html',\n 'navigation.html',\n 'relations.html',\n 'searchbox.html',\n 'donate.html',\n ]\n}\nhtml_theme_options = {\n 'logo': 'gmt-python-logo.png',\n 'logo_name': 'false',\n 'github_user': 'GenericMappingTools',\n 'github_repo': 'gmt-python',\n 'github_button': 'false',\n 'github_banner': 'true',\n 'description': 'A Python interface for the Generic Mapping Tools',\n 'extra_nav_links': {\n 'GMT website': 'http://gmt.soest.hawaii.edu',\n 'GMT modern mode': 'http://gmt.soest.hawaii.edu/projects/gmt/wiki/Modernization',\n 'Source code': 'https://github.com/GenericMappingTools/gmt-python',\n 'Public chat room': 'https://gitter.im/GenericMappingTools/gmt-python',\n }\n}\n\n","sub_path":"doc/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":3299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"471623030","text":"#!/usr/bin/env python3\nimport sys\nfrom math import sqrt\nfrom fractions import Fraction\nimport os.path\nimport statistics as stats\nimport scipy.stats as scistats\nimport scipy.misc as scimisc\n\ncwd = os.getcwd()\n\ndef binomials():\n with open(cwd+'/binomials.txt') as f:\n nums = f.read().split()\n\n numlst = list(float(num) for num in nums)\n\n a_x, a_n, a_p = numlst[0:3]\n b_x, b_n, b_p = numlst[3:6]\n c_n, c_p = numlst[6:8]\n d_n, d_p = numlst[8:10]\n\n print('part a: ')\n a_binom_prob = scistats.binom.pmf(a_x, a_n, a_p)\n print('b({0:0.0f}; {1:0.0f}, {2:0.2f}) = {3:0.4f}\\n'.format(a_x, a_n, a_p,\n a_binom_prob))\n\n print('part b: ')\n b_binom_prob = scistats.binom.pmf(b_x, b_n, b_p)\n print('b({0:0.0f}; {1:0.0f}, {2:0.2f}) = {3:0.4f}\\n'.format(b_x, b_n, b_p,\n b_binom_prob))\n\n print('part c: ')\n c_prob = 0\n for i in range(3, 6):\n c_prob += scistats.binom.pmf(i, c_n, c_p)\n print('P(3 <= X <= 5) = {0:0.4f}\\n'.format(c_prob))\n\n print('part d: ')\n d_prob = 1 - scistats.binom.pmf(0, d_n, d_p)\n print('P(1 <= X) = {0:0.4f}\\n'.format(d_prob))\n\ndef circuits():\n with open(cwd+'/circuits.txt') as f:\n nums = f.read().split()\n\n numlst = list(float(num) for num in nums)\n\n n, p = numlst[0:2]\n print('part a: ')\n a_prob = scistats.binom.cdf(2, n, p)\n print('P(X <= 2) = {0:0.4f}\\n'.format(a_prob))\n\n print('part b: ')\n b_prob = 1 - scistats.binom.cdf(4, n, p)\n print('P(X >= 5) = {0:0.4f}\\n'.format(b_prob))\n\n print('part c: ')\n c_prob = scistats.binom.cdf(4, n, p) - scistats.binom.cdf(0, n, p)\n print('P(1 <= X <= 4) = {0:0.4f}\\n'.format(c_prob))\n\n print('part d: ')\n d_prob = scistats.binom.cdf(0, n, p)\n print('P(X = 0) = {0:0.4f}\\n'.format(d_prob))\n\n print('part e: ')\n expected = n * p\n std_dev = sqrt(expected * (1-p))\n print('E(X) = {0:0.4f}\\n'.format(expected))\n print('sigma(X) = {0:0.4f}\\n'.format(std_dev))\n\n\ndef tornadoes():\n with open(cwd+'/tornadoes.txt') as f:\n nums = list(f.read().split())\n\n poisson = float(nums[0])\n\n print('part a: ')\n a_prob = scistats.poisson.cdf(5, poisson)\n print('P(X <= 5) = {0:0.4f}\\n'.format(a_prob))\n\n print('part b: ')\n b_prob = scistats.poisson.cdf(9, poisson) - scistats.poisson.cdf(5, poisson)\n print('P(6 <= X <= 9) = {0:0.4f}\\n'.format(b_prob))\n\n print('part c: ')\n c_prob = 1 - scistats.poisson.cdf(9, poisson)\n print('P(10 <= X) = {0:0.4f}\\n'.format(c_prob))\n\n print('part d: ')\n d_std_dev = sqrt(poisson)\n d_prob = 1 - scistats.poisson.cdf(poisson+d_std_dev, poisson)\n print('{0:0.4f}\\n'.format(d_prob))\n\ndef boiler():\n with open(cwd+'/boiler.txt') as f:\n nums = f.read().split()\n\n numlst = list(float(num) for num in nums)\n\n poisson = numlst[0]\n a_x = numlst[1]\n b_x = numlst[2]\n c_x = numlst[3]\n d_x1, d_x2 = numlst[4:6]\n e_x1, e_x2 = numlst[6:8]\n\n print('part a: ')\n a_prob = scistats.poisson.cdf(a_x, poisson)\n print('P(X <= {0:0.0f}) = {1:0.4f}\\n'.format(a_x , a_prob))\n\n print('part b: ')\n b_prob = scistats.poisson.pmf(b_x, poisson)\n print('P(X = {0:0.0f}) = {1:0.4f}\\n'.format(b_x , b_prob))\n\n print('part c: ')\n c_prob = 1 - scistats.poisson.cdf(c_x-1, poisson)\n print('P({0:0.0f} <= X) = {1:0.4f}\\n'.format(c_x , c_prob))\n\n print('part d: ')\n d_prob = scistats.poisson.cdf(d_x2, poisson) - scistats.poisson.cdf(d_x1-1,\n poisson)\n print('P({0:0.0f} <= X <= {1:0.0f}) = {2:0.4f}\\n'.format(d_x1, d_x2, d_prob))\n\n print('part e: ')\n e_prob = scistats.poisson.cdf(e_x2-1, poisson) - scistats.poisson.cdf(e_x1,\n poisson)\n print('P({0:0.0f} < X < {1:0.0f}) = {2:0.4f}\\n'.format(e_x1, e_x2, e_prob))\n\ndef camera():\n with open(cwd+'/camera.txt') as f:\n nums = f.read().split()\n\n numlst = list(float(num) for num in nums)\n\n N, M, n = numlst[0:3]\n\n print('part a: ')\n print('Distribution: hypergeometric\\n')\n print('N = {0:0.0f}, M = {1:0.0f}, n = {2:0.0f}'.format(N, M, n))\n\n print('part b: ')\n b_prob = scistats.hypergeom.pmf(2, N, n, M)\n b_frac = str(Fraction(b_prob).limit_denominator())\n print('P(X = 2) = {}\\n'.format(b_frac))\n\n print('part c: ')\n c_prob = scistats.hypergeom.cdf(2, N, n, M)\n c_frac = str(Fraction(c_prob).limit_denominator())\n print('P(X <= 2) = {}\\n'.format(c_frac))\n\n print('part d: ')\n d_prob = 1 - scistats.hypergeom.cdf(1, N, n, M)\n d_frac = str(Fraction(d_prob).limit_denominator())\n print('P(X >= 2) = {}\\n'.format(d_frac))\n\n print('part e: ')\n expected = n * (M / N)\n std_dev = scistats.hypergeom.std(N, n, M)\n print('E(X) = {0:0.04f}\\n'.format(expected))\n print('sigma(X) = {0:0.04f}\\n'.format(std_dev))\n\ndef emergency():\n with open(cwd+'/emergency.txt') as f:\n nums = f.read().split()\n\n numlst = list(float(num) for num in nums)\n\n poisson, rate = numlst[0:2]\n\n print('part a: ')\n a_prob = scistats.poisson.pmf(4, poisson)\n print('P(X = 4) = {0:0.04f}\\n'.format(a_prob))\n\n print('part b: ')\n b_prob = 1 - scistats.poisson.cdf(3, poisson)\n print('P(X >= 4) = {0:0.04f}\\n'.format(b_prob))\n\n print('part c: ')\n c_prob = poisson * (rate / 60)\n print('{0:0.04f}\\n'.format(c_prob))\n\ndef pulse():\n with open(cwd+'/pulse.txt') as f:\n nums = f.read().split()\n\n numlst = list(float(num) for num in nums)\n\n poisson = numlst[0]\n\n print('part a: ')\n a_prob = scistats.poisson.pmf(1, poisson)\n print('P(X = 1) = {0:0.04f}\\n'.format(a_prob))\n\n print('part b: ')\n b_prob = 1 - scistats.poisson.cdf(1, poisson)\n print('P(X >= 2) = {0:0.04f}\\n'.format(b_prob))\n\n print('part c: ')\n c_prob = scistats.poisson.pmf(0, poisson) ** 2\n print('P(neither) = {0:0.04f}\\n'.format(c_prob))\n\ndef children():\n\n prob_b = 0.5\n prob_g = 0.5\n\n prob_3 = 2 * (prob_b ** 3)\n prob_4 = 2 * scimisc.comb(3, 2) * (prob_b ** 4)\n prob_5 = 1 - prob_3 - prob_4\n\n x_dict = { 0 : 0,\n 1 : 0,\n 2 : 0,\n 3 : prob_3,\n 4 : prob_4,\n 5 : prob_5,\n 6 : 0\n }\n print(x_dict)\n\n\ndef diodes():\n with open(cwd+'/diodes.txt') as f:\n nums = f.read().split()\n\n numlst = list(float(num) for num in nums)\n\n p, n = numlst[0], numlst[1]\n\n print('part a: ')\n expected = n * p\n std_dev = sqrt(expected * (1-p))\n print('E(X) = {0:0.04f}\\n'.format(expected))\n print('sigma(X) = {0:0.04f}\\n'.format(std_dev))\n\n print('part b: ')\n b_prob = 1 - scistats.poisson.cdf(3, expected)\n print('P(X >= 4) = {0:0.04f}\\n'.format(b_prob))\n\n print('part c: ')\n c_prob = scistats.poisson.pmf(0, expected)\n\n c_n = 5\n\n c_probb = scistats.binom.pmf(4, c_n, c_prob) + scistats.binom.pmf(5, c_n,\n c_prob)\n\n print('P(all diodes) = {0:0.04f}\\n'.format(c_probb))\n\ndef interview():\n with open(cwd+'/interview.txt') as f:\n nums = f.read().split()\n\n numlst = list(float(num) for num in nums)\n\n N, n, M = numlst[0:3]\n\n print('part a: ')\n print('h(x, {0:0.0f}, {1:0.0f}, {2:0.0f})\\n'.format(M, n, N))\n\n print('part b: ')\n expected = n * (M / N)\n print('E(X) = {0:0.04f}\\n'.format(expected))\n\nuser_options = [ 'binomials',\n 'circuits',\n 'tornadoes',\n 'boiler',\n 'camera',\n 'emergency',\n 'pulse',\n 'children',\n 'diodes',\n 'interview'\n ]\n\noptions = { 1 : binomials,\n 2 : circuits,\n 3 : tornadoes,\n 4 : boiler,\n 5 : camera,\n 6 : emergency,\n 7 : pulse,\n 8 : children,\n 9 : diodes,\n 10 : interview\n }\n\ndef main():\n\n while True:\n print('Select an option:\\n')\n\n for i, option in enumerate(user_options):\n print(' {}. {}'.format(i+1, option))\n print(' ')\n\n try:\n sel = input()\n break\n except KeyboardInterrupt:\n print('\\nBye Felicia!')\n quit()\n\n options[int(sel)]()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"hw6/hw6.py","file_name":"hw6.py","file_ext":"py","file_size_in_byte":8207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"159142716","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 31 18:42:17 2017\n\n@author: Cesar Alvarez\n\"\"\"\nfrom settings import * #file with tweepy keys\nfrom bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\nimport tweepy\nfrom datetime import datetime, timedelta\nimport sys\nimport re\n\n#Create function to tweet texts######################\ndef twittpost(status):\n '''twittpost(status) ---> status = string\n This function update status for twitter\n '''\n \n# CONSUMER_KEY = ''\n# CONSUMER_SECRET= ''\n# ACCESS_TOKEN = ''\n# ACCESS_TOKEN_SECRET = ''\n \n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\n api = tweepy.API(auth)\n\n r1 = api.update_status(status=status)\n \n return(r1)\n\n# extract acestream:// for every channel number\ndef getav(av):\n '''\n getav('http://arenavision.in/avXX') \n get current acestream:// for http://arenavision.in/av1-2..\n return str('acestream://xxxxxxxxxxx')\n '''\n response = requests.get(av)\n soup = BeautifulSoup(response.text, 'html.parser')\n _cav=[]\n for a in soup.find_all('a', href=True):\n _cav.append((a['href']))\n\n for i in _cav:\n if i.find('acestream://') ==0:\n return(str(i))\n\n# DataFrame setup#########################################\n\ncolum01 = ['Date', 'Time', 'Sport', 'Comp', 'event', 'channel']\ndf = pd.DataFrame(columns=colum01)\n\n#arenavision scrapper\ntry:\n \n response = requests.get('http://arenavision.in/schedule')\n soup = BeautifulSoup(response.text, 'html.parser')\n \n data_table = soup.find(\"table\", {\"class\" : \"auto-style1\"})\n \n for row in data_table.findAll('tr')[1:-2]:\n col = row.findAll('td')\n d1 = col[0].string.strip()\n d2 = col[1].get_text().replace(' CET', '')\n d3 = col[2].string.strip()\n d4 = col[3].string.strip()\n d5 = col[4].string.strip()\n d6 = col[5].get_text().replace('\\n\\t\\t', '')\n row_comp = [d1, d2, d3, d4, d5, d6]\n df.loc[len(df)] = row_comp\nexcept:\n sys.exit('error retrieving or processing site')\n \n##delete date and time column and set a datetime index\ndates = pd.to_datetime(df['Date'] + ' ' + df['Time'])\ndf = df.drop('Date',1)\ndf = df.drop('Time',1)\ndf.insert(0, 'Date', dates)\ndf = df.set_index('Date')\n\n##select rows in advance\nst = '{:%Y-%d-%m %H:%M:%S}'.format(datetime.now() + timedelta(minutes=31))\nend = '{:%Y-%d-%m %H:%M:%S}'.format(datetime.now() + timedelta(minutes=60))\n\nsel = df.ix[st:end]\n#%%\n#iterator to post to twitter\nif len(sel) != 0:\n for a in sel.itertuples():\n twittpost('''{}\\n {}\\n {}\\n {}\\n {}\\n'''.format(\n str(a.Index.to_pydatetime()), a.Sport, a.Comp, a.event, a.channel))\n# extract channels number\n\n# _ca = a.channel \n# _cb = re.sub(\"\\D\", \" \", str(a.channel))\n _cc = (re.sub(\"\\D\", \" \", str(a.channel))).split()\n\n for (a,b) in enumerate(_cc):\n _cay = \"http://arenavision.in/av\" + b\n twittpost('''av{}: {}'''.format(b, getav(_cay)))\n \n \n\n","sub_path":"arenapp.py","file_name":"arenapp.py","file_ext":"py","file_size_in_byte":3069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"414831490","text":"# https://deepscopy.com/Create_your_Mini_Word_Embedding_from_Scratch_using_Pytorch\nfrom keras.preprocessing.sequence import pad_sequences\nimport numpy as np\nfrom sklearn.preprocessing import OneHotEncoder\nfrom torch import nn\nimport torch\nfrom torchsummary import summary\nfrom matplotlib import pyplot as plt\nimport random\nimport plotly.graph_objects as go\nimport pandas as pd\nimport plotly\nimport regex as re\n\nWORD_LENGTH = 1001\n\n\ndef store_words(words):\n f = open(\"saved/word_dict.txt\", \"w\")\n for i in range(0, len(words)):\n f.write(str(i) + \",\" + words[i] + \"\\n\")\n f.close()\n\n\ndef store_predictions(prediction, model, device):\n\n f = open(\"saved/predictions.txt\", \"w\")\n for i in range(prediction.shape[0]):\n result, _ = model(\n torch.from_numpy(prediction[i]).unsqueeze(0).float().to(device)\n )\n resultNumpy = result.detach().numpy()\n resultNoLineBreaks = (\n resultNumpy.tolist()\n ) # np.array_repr(resultNumpy).replace('\\n', '')\n f.write(str(i) + \",\" + str(resultNoLineBreaks) + \"\\n\")\n f.close()\n\n\n# builds two dictionaries: a word to unique numerical ID\n# and another where the id is key and the word is value.\ndef word_indexer(corpus, words):\n idx_2_word = {}\n word_2_idx = {}\n temp = []\n i = 1\n for sentence in corpus:\n for word in sentence.split():\n if (word not in temp) and (word in words):\n temp.append(word)\n idx_2_word[i] = word\n word_2_idx[word] = i\n i += 1\n return idx_2_word, word_2_idx\n\n\ndef one_hot_map(doc, word_2_idx, words):\n x = []\n for word in doc.split():\n if word in words:\n x.append(word_2_idx[word])\n return x\n\n\ndef build_input_target_pairs(padded_docs):\n training_data = np.empty((0, 2))\n\n window = 2 # how many neighbours to take into consideration per word.\n for sentence in padded_docs:\n sent_len = len(sentence)\n for i, word in enumerate(sentence):\n w_context = []\n if sentence[i] != 0:\n w_target = sentence[i]\n for j in range(i - window, i + window + 1):\n if j != i and j <= sent_len - 1 and j >= 0 and sentence[j] != 0:\n w_context = sentence[j]\n training_data = np.append(\n training_data, [[w_target, w_context]], axis=0\n )\n # training_data.append([w_target, w_context])\n return training_data\n\n\ndef perform_one_hot_encoding(training_data):\n enc = OneHotEncoder()\n enc.fit(np.array(range(WORD_LENGTH)).reshape(-1, 1))\n onehot_label_x = enc.transform(training_data[:, 0].reshape(-1, 1)).toarray()\n\n enc = OneHotEncoder()\n enc.fit(np.array(range(WORD_LENGTH)).reshape(-1, 1))\n onehot_label_y = enc.transform(training_data[:, 1].reshape(-1, 1)).toarray()\n\n return onehot_label_x, onehot_label_y\n\n\nclass WEMB(nn.Module):\n def __init__(self, input_size, hidden_size):\n super().__init__()\n self.layer1 = nn.Linear(input_size, hidden_size)\n self.relu = nn.ReLU()\n self.layer2 = nn.Linear(hidden_size, input_size)\n self.softmax = nn.Softmax(dim=1)\n\n def forward(self, data_input):\n out_bn = self.layer1(data_input)\n output_layer1 = self.relu(out_bn)\n output_layer2 = self.layer2(output_layer1)\n output_layer3 = self.softmax(output_layer2)\n return output_layer3, out_bn\n\n\ndef train_the_model(onehot_label_x, onehot_label_y):\n input_size = WORD_LENGTH\n hidden_size = 32\n learning_rate = 0.01\n num_epochs = 1\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n model = WEMB(input_size, hidden_size).to(device)\n model.train(True)\n # print(model)\n\n # Loss and optimizer\n criterion = nn.BCELoss()\n optimizer = torch.optim.SGD(\n model.parameters(), lr=learning_rate, momentum=0, weight_decay=0, nesterov=False\n )\n summary(model, torch.ones((1, WORD_LENGTH)))\n\n loss_val = []\n onehot_label_x = onehot_label_x.to(device)\n onehot_label_y = onehot_label_y.to(device)\n\n for epoch in range(num_epochs):\n for i in range(onehot_label_y.shape[0]):\n inputs = onehot_label_x[i].float()\n labels = onehot_label_y[i].float()\n inputs = inputs.unsqueeze(0)\n labels = labels.unsqueeze(0)\n\n # Forward pass\n output, wemb = model(inputs)\n loss = criterion(output, labels)\n\n # Backward and optimize\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n loss_val.append(loss.item())\n\n if (epoch + 1) % 1 == 0:\n print(f\"Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}\")\n return model, device, loss_val\n\n\ndef main():\n\n #### preprocessing\n filename = \"./text/100-0.txt\"\n with open(filename) as f:\n text = f.read()\n\n text = re.sub(r\"(M\\w{1,2})\\.\", r\"\\1\", text)\n text = re.sub(r\"(\\n)\", r\"\", text)\n text = re.sub(\"[\\,\\_,\\],\\[]\", \"\", text)\n corpus = re.split(r' *[\\.\\?!][\\'\"\\)\\]]* *', text.lower())\n # print(corpus)\n\n wordFrequency = {}\n\n for sentence in corpus:\n for word in sentence.split():\n if word in wordFrequency:\n wordFrequency[word] = wordFrequency[word] + 1\n else:\n wordFrequency[word] = 1\n\n sortedWordFrequency = dict(sorted(wordFrequency.items(), key=lambda item: item[1]))\n\n # print(sortedWordFrequency)\n\n sortedWordFrequencyList = list(sortedWordFrequency.keys())\n # print(sortedWordFrequencyList)\n # TODO: without the most frequent 100\n i = 0\n while i < 100:\n sortedWordFrequencyList.pop()\n i = i + 1\n # print(sortedWordFrequencyList)\n # TODO: add subscript, only the 10000 most frequent after the stopwords.\n words = sortedWordFrequencyList[-(WORD_LENGTH - 1) :]\n store_words(words)\n print(words)\n print(len(words))\n\n if len(words) + 1 != WORD_LENGTH:\n print(\"set correctly the WORD_LENGTH variable.\")\n print(\"it should be \" + str((len(words) + 1)))\n exit()\n\n idx_2_word, word_2_idx = word_indexer(corpus, words)\n encoded_docs = [one_hot_map(d, word_2_idx, words) for d in corpus]\n max_len = WORD_LENGTH - 1\n padded_docs = pad_sequences(encoded_docs, maxlen=max_len, padding=\"post\")\n training_data = build_input_target_pairs(padded_docs)\n\n onehot_label_x, onehot_label_y = perform_one_hot_encoding(training_data)\n\n #### hyper-parameter selection\n\n onehot_label_x = torch.from_numpy(onehot_label_x)\n onehot_label_y = torch.from_numpy(onehot_label_y)\n\n #### training the model.\n print(\"the learning has started.\")\n model, device, loss_val = train_the_model(onehot_label_x, onehot_label_y)\n\n #### testing the model.\n docs = words\n test_list = []\n for i in range(1, WORD_LENGTH):\n test_list.append(i)\n\n test_arr = np.array(test_list)\n\n enc = OneHotEncoder()\n enc.fit(np.array(range(WORD_LENGTH)).reshape(-1, 1))\n test = enc.transform(test_arr.reshape(-1, 1)).toarray()\n\n store_predictions(test, model, device)\n\n output = []\n for i in range(test.shape[0]):\n result, wemb2 = model(torch.from_numpy(test[i]).unsqueeze(0).float().to(device))\n wemb2 = wemb2[0].detach().cpu().numpy()\n output.append(wemb2)\n # print(\"for: \" + words[i])\n # print(\"the closest word is: \")\n resultNumpy = result.detach().numpy()\n maxIndex = np.argmax(resultNumpy, axis=1)[0]\n # print(words[maxIndex-1])\n\n xs = []\n ys = []\n for i in range(len(output)):\n xs.append(output[i][0])\n ys.append(output[i][1])\n print(xs, ys)\n\n label = docs\n\n fig = go.Figure(\n data=go.Scatter(\n x=xs,\n y=ys,\n text=label,\n mode=\"markers\",\n marker=dict(\n size=16,\n color=np.random.randn(500), # set color equal to a variable\n colorscale=\"Viridis\", # one of plotly colorscales\n showscale=True,\n ),\n )\n ) # hover text goes here\n\n fig.update_layout(title=\"word embeddings\")\n plotly.offline.plot(fig, filename=\"word_embedding_results.html\")\n fig.show()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"word_embeddings.py","file_name":"word_embeddings.py","file_ext":"py","file_size_in_byte":8404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"253491316","text":"'''Configuration page for creating node networks and connecting devices'''\nimport os\nfrom functools import wraps\nimport base64\nfrom cryptography import fernet\nfrom aiohttp import web, MultipartWriter\nfrom aiohttp_session import setup, get_session\nfrom aiohttp_session.cookie_storage import EncryptedCookieStorage\nfrom mako.template import Template\nfrom mako.lookup import TemplateLookup\nfrom colorama import Fore, Style\nfrom ..devices import AVAILABLE\nfrom ..devices.computer import Computer\nfrom ..devices.webview import WebView\nfrom ..networking import IPV4\n\nDIRNAME = os.path.dirname(__file__)\n\ndef get_pages():\n '''Create mako templates from all files in pages folder'''\n pages_folder = os.path.join(DIRNAME, 'pages')\n component_folder = os.path.join(DIRNAME, 'components')\n\n lookup = TemplateLookup(directories=[pages_folder, component_folder])\n\n pages = {}\n for filename in os.listdir(pages_folder):\n pages['.'.join(filename.split('.')[:-1])] =\\\n Template(filename=os.path.join(pages_folder, filename), lookup=lookup)\n return pages\n\nasync def run(*args, **kwargs):\n import traceback\n try:\n await run_inner(*args, **kwargs)\n except:\n traceback.print_exc()\n\nasync def run_inner(system, password, port=8080):\n '''\n Start config page\n returns exit function\n '''\n\n webview = WebView()\n await system.add_device(webview)\n\n pages = get_pages()\n\n def static_file(path):\n return web.FileResponse(\n os.path.join(DIRNAME, 'static', path)\n )\n\n async def static(request):\n '''Serve static files from static folder'''\n path = request.match_info.get('path')\n return static_file(path)\n\n async def login_form(_):\n return web.Response(\n body=pages['login'].render(),\n content_type='text/html',\n )\n async def login(request):\n password_attempt = (await request.post()).get('password')\n print(password_attempt, password)\n session = await get_session(request)\n session['logged_in'] = password_attempt == password\n raise web.HTTPFound(request.headers.get('referer', ''))\n\n def require_login(func):\n @wraps(func)\n async def wrapped(request):\n session = await get_session(request)\n logged_in = session.get('logged_in')\n if not logged_in:\n return await login_form(request)\n return await func(request)\n return wrapped\n\n async def index(_):\n user_id = None\n return web.Response(\n body=pages['index'].render(user_id=user_id),\n content_type='text/html',\n )\n\n async def view(_):\n return web.Response(\n body=pages['view'].render(),\n content_type='text/html',\n )\n\n @require_login\n async def devices(_):\n\n device_groups = [('Root', system.devices)]\n for device in system.devices:\n if isinstance(device, Computer):\n device_groups.append((device.desc, device.devices))\n\n return web.Response(\n body=pages['devices'].render(\n device_groups=device_groups,\n device_types=AVAILABLE,\n ),\n content_type='text/html',\n )\n\n @require_login\n async def add_device(request):\n query = request.rel_url.query\n computer = query.get('computer')\n type_name = query.get('type')\n device_params = query.get('device_params')\n section_params = query.get('section_params')\n\n print(computer, type_name, device_params, section_params)\n\n if computer == 'Root':\n await system.add_device_from_text(type_name, device_params, section_params)\n return web.HTTPFound('/devices')\n\n for device in system.devices:\n if isinstance(device, Computer):\n if device.desc == computer:\n return web.Response(text=\"not yet implemented :'(\")\n\n raise web.HTTPClientError(reason=f'Computer at {computer} not found')\n\n @require_login\n async def remove_device(request):\n query = request.rel_url.query\n device_id = int(query.get('id'))\n await system.remove_device(device_id)\n return web.HTTPFound('/devices')\n\n async def view_raw(request):\n '''Return response with frames as png from specified screen'''\n frames = webview.generate()\n boundary = 'frame'\n response = web.StreamResponse(\n status=200,\n reason='OK',\n headers={\n 'Content-Type': 'multipart/x-mixed-replace;boundary={}'.format(boundary)\n }\n )\n await response.prepare(request)\n async for frame in frames:\n with MultipartWriter('image/jpeg', boundary=boundary) as mpwriter:\n mpwriter.append(frame, {\n 'Content-Type': 'image/jpeg'\n })\n await mpwriter.write(response, close_boundary=False)\n await response.drain()\n\n @require_login\n async def audio(_):\n return web.Response(\n body=pages['audio'].render(audio_queue=system.audio_queue),\n content_type='text/html')\n\n @require_login\n async def add_to_queue(request):\n query = request.rel_url.query\n song_url = query.get('song_url')\n if song_url:\n try:\n await system.audio_queue.add(song_url)\n return web.HTTPFound('/audio')\n except ValueError:\n return web.HTTPClientError(reason='Unsupported URL')\n return web.HTTPBadRequest()\n\n @require_login\n async def control_audio_queue(request):\n command = request.match_info.get('command')\n if command == 'add':\n return await add_to_queue(request)\n\n if command in ('play', 'pause', 'stop', 'next', 'prev'):\n await getattr(system.audio_queue, command)()\n elif command == 'skip_to':\n try:\n seconds = float(request.rel_url.query['seconds'])\n await system.audio_queue.skip_to(seconds)\n except (KeyError, ValueError):\n return web.HTTPBadRequest()\n elif command in ('move_up', 'remove', 'move_down', 'play_now'):\n try:\n song_id = int(request.rel_url.query['id'])\n await getattr(system.audio_queue, command)(song_id)\n except (KeyError, ValueError):\n return web.HTTPBadRequest()\n else:\n return web.HTTPNotFound()\n return web.HTTPFound('/audio')\n\n app = web.Application()\n fernet_key = fernet.Fernet.generate_key()\n secret_key = base64.urlsafe_b64decode(fernet_key)\n setup(app, EncryptedCookieStorage(secret_key))\n app.add_routes([\n web.get('/static/{path:.*}', static),\n web.post('/login', login),\n web.get('/', index),\n web.get('/view', view),\n web.get('/view/raw', view_raw),\n web.get('/devices', devices),\n web.get('/devices/add', add_device),\n web.get('/devices/remove', remove_device),\n web.get('/audio', audio),\n web.get('/audio/{command}', control_audio_queue),\n ])\n runner = web.AppRunner(app)\n await runner.setup()\n site = web.TCPSite(runner, '0.0.0.0', port)\n print(f'{Fore.CYAN}{Style.BRIGHT}Configuration page can be found on '\n f'http://{IPV4}:{port}/{Style.RESET_ALL}')\n await site.start()\n","sub_path":"_old/rgballthethings/config_page/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"560342783","text":"from _Helper import *\n\n# \n# The thief has found himself a new place for his thievery again. There is only one entrance to this area, called the \"root.\" Besides the root, each house has one and only one parent house. After a tour, the smart thief realized that \"all houses in this place forms a binary tree\". It will automatically contact the police if two directly-linked houses were broken into on the same night. \n# Determine the maximum amount of money the thief can rob tonight without alerting the police. \n# Example 1:\n# 3\n# / \\\n# 2 3\n# \\ \\ \n# 3 1\n# Maximum amount of money the thief can rob = 3 + 3 + 1 = 7. \n# Example 2:\n# 3\n# / \\\n# 4 5\n# / \\ \\ \n# 1 3 1\n# Maximum amount of money the thief can rob = 4 + 5 = 9. \n# \n\n# \n# TIME: O(N)\n# SPACE: O(N)\n# \n\n# \n# DFS, use an extra class to store final max and unselected max, like memory searching, bottom to top searching\n# \n\n# \n# Medium\n# \n\n# \n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n\nclass Solution(object):\n class NodeMax(object):\n def __init__(self, final, unchoose):\n self.finalMax = final\n self.unchooseMax = unchoose\n\n def dfs(self, root):\n if root is None:\n return self.NodeMax(0, 0)\n leftchildmax = self.dfs(root.left)\n rightchildmax = self.dfs(root.right)\n return self.NodeMax(\n max(root.val + leftchildmax.unchooseMax +\n rightchildmax.unchooseMax,\n leftchildmax.finalMax + rightchildmax.finalMax),\n leftchildmax.finalMax + rightchildmax.finalMax)\n\n def rob(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n return self.dfs(root).finalMax\n\n\n# \n\n# \ns = Solution()\nInput = None\nexpected = 0\nactual = s.rob(ArrayToTreeNode(Input))\nAreEqual(expected, actual, Input)\n# another pair\nInput = [3, 1, 100]\nexpected = 101\nactual = s.rob(ArrayToTreeNode(Input))\nAreEqual(expected, actual, Input)\n# another pair\nInput = [3, 2, 3, None, 3, None, 1]\nexpected = 7\nactual = s.rob(ArrayToTreeNode(Input))\nAreEqual(expected, actual, Input)\n# another pair\nInput = [3, 4, 5, 1, 3, None, 1]\nexpected = 9\nactual = s.rob(ArrayToTreeNode(Input))\nAreEqual(expected, actual, Input)\n# another pair\nInput = [4, 1, None, 2, None, 3]\nexpected = 7\nactual = s.rob(ArrayToTreeNode(Input))\nAreEqual(expected, actual, Input)\n# ","sub_path":"337_House_Robber_III.py","file_name":"337_House_Robber_III.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"192537491","text":"\"\"\"Tests for the ``sympy.physics.mechanics._geometry.py`` module.\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nimport pytest\n\nfrom sympy.core.backend import (\n Integer,\n Rational,\n S,\n Symbol,\n acos,\n cos,\n pi,\n sin,\n sqrt,\n)\nfrom sympy.core.relational import Eq\nfrom sympy.physics.mechanics import Point, ReferenceFrame, dynamicsymbols\nfrom sympy.physics.mechanics._geometry import Cylinder, Sphere\nfrom sympy.simplify.simplify import simplify\n\nif TYPE_CHECKING:\n from sympy.core.backend import USE_SYMENGINE\n from sympy.physics.mechanics import Vector\n\n if USE_SYMENGINE:\n from sympy.core.backend import Basic as ExprType\n else:\n from sympy.core.expr import Expr as ExprType\n\n\nr = Symbol('r')\nx = Symbol('x')\nq = dynamicsymbols('q')\nN = ReferenceFrame('N')\n\n\nclass TestSphere:\n\n @staticmethod\n def test_valid_constructor() -> None:\n r = Symbol('r')\n pO = Point('pO')\n sphere = Sphere(r, pO)\n assert isinstance(sphere, Sphere)\n assert hasattr(sphere, 'radius')\n assert sphere.radius == r\n assert hasattr(sphere, 'point')\n assert sphere.point == pO\n\n @staticmethod\n @pytest.mark.parametrize('position', [S.Zero, Integer(2) * r * N.x])\n def test_geodesic_length_point_not_on_surface_invalid(position: Vector) -> None:\n r = Symbol('r')\n pO = Point('pO')\n sphere = Sphere(r, pO)\n\n p1 = Point('p1')\n p1.set_pos(pO, position)\n p2 = Point('p2')\n p2.set_pos(pO, position)\n\n error_msg = r'point .* does not lie on the surface of'\n with pytest.raises(ValueError, match=error_msg):\n sphere.geodesic_length(p1, p2)\n\n @staticmethod\n @pytest.mark.parametrize(\n 'position_1, position_2, expected',\n [\n (r * N.x, r * N.x, S.Zero),\n (r * N.x, r * N.y, S.Half * pi * r),\n (r * N.x, r * -N.x, pi * r),\n (r * -N.x, r * N.x, pi * r),\n (r * N.x, r * sqrt(2) * S.Half * (N.x + N.y), Rational(1, 4) * pi * r),\n (\n r * sqrt(2) * S.Half * (N.x + N.y),\n r * sqrt(3) * Rational(1, 3) * (N.x + N.y + N.z),\n r * acos(sqrt(6) * Rational(1, 3)),\n ),\n ]\n )\n def test_geodesic_length(position_1: Vector, position_2: Vector, expected: ExprType) -> None:\n r = Symbol('r')\n pO = Point('pO')\n sphere = Sphere(r, pO)\n\n p1 = Point('p1')\n p1.set_pos(pO, position_1)\n p2 = Point('p2')\n p2.set_pos(pO, position_2)\n\n assert simplify(Eq(sphere.geodesic_length(p1, p2), expected))\n\n\nclass TestCylinder:\n\n @staticmethod\n def test_valid_constructor() -> None:\n N = ReferenceFrame('N')\n r = Symbol('r')\n pO = Point('pO')\n cylinder = Cylinder(r, pO, N.x)\n assert isinstance(cylinder, Cylinder)\n assert hasattr(cylinder, 'radius')\n assert cylinder.radius == r\n assert hasattr(cylinder, 'point')\n assert cylinder.point == pO\n assert hasattr(cylinder, 'axis')\n assert cylinder.axis == N.x\n\n @staticmethod\n @pytest.mark.parametrize(\n 'position, expected',\n [\n (S.Zero, False),\n (r * N.y, True),\n (r * N.z, True),\n (r * (N.y + N.z).normalize(), True),\n (Integer(2) * r * N.y, False),\n (r * (N.x + N.y), True),\n (r * (Integer(2) * N.x + N.y), True),\n (Integer(2) * N.x + r * (Integer(2) * N.y + N.z).normalize(), True),\n (r * (cos(q) * N.y + sin(q) * N.z), True)\n ]\n )\n def test_point_is_on_surface(position: Vector, expected: bool) -> None:\n r = Symbol('r')\n pO = Point('pO')\n cylinder = Cylinder(r, pO, N.x)\n\n p1 = Point('p1')\n p1.set_pos(pO, position)\n\n assert cylinder._point_is_on_surface(p1) is expected\n\n @staticmethod\n @pytest.mark.parametrize('position', [S.Zero, Integer(2) * r * N.y])\n def test_geodesic_length_point_not_on_surface_invalid(position: Vector) -> None:\n r = Symbol('r')\n pO = Point('pO')\n cylinder = Cylinder(r, pO, N.x)\n\n p1 = Point('p1')\n p1.set_pos(pO, position)\n p2 = Point('p2')\n p2.set_pos(pO, position)\n\n error_msg = r'point .* does not lie on the surface of'\n with pytest.raises(ValueError, match=error_msg):\n cylinder.geodesic_length(p1, p2)\n\n @staticmethod\n @pytest.mark.parametrize(\n 'axis, position_1, position_2, expected',\n [\n (N.x, r * N.y, r * N.y, S.Zero),\n (N.x, r * N.y, N.x + r * N.y, S.One),\n (N.x, r * N.y, -x * N.x + r * N.y, sqrt(x**2)),\n (-N.x, r * N.y, x * N.x + r * N.y, sqrt(x**2)),\n (N.x, r * N.y, r * N.z, S.Half * pi * sqrt(r**2)),\n (-N.x, r * N.y, r * N.z, Integer(3) * S.Half * pi * sqrt(r**2)),\n (N.x, r * N.z, r * N.y, Integer(3) * S.Half * pi * sqrt(r**2)),\n (-N.x, r * N.z, r * N.y, S.Half * pi * sqrt(r**2)),\n (N.x, r * N.y, r * (cos(q) * N.y + sin(q) * N.z), sqrt(r**2 * q**2)),\n (\n -N.x, r * N.y,\n r * (cos(q) * N.y + sin(q) * N.z),\n sqrt(r**2 * (Integer(2) * pi - q)**2),\n ),\n ]\n )\n def test_geodesic_length(\n axis: Vector,\n position_1: Vector,\n position_2: Vector,\n expected: ExprType,\n ) -> None:\n r = Symbol('r')\n pO = Point('pO')\n cylinder = Cylinder(r, pO, axis)\n\n p1 = Point('p1')\n p1.set_pos(pO, position_1)\n p2 = Point('p2')\n p2.set_pos(pO, position_2)\n\n assert simplify(Eq(cylinder.geodesic_length(p1, p2), expected))\n","sub_path":"sympy/physics/mechanics/tests/test_geometry.py","file_name":"test_geometry.py","file_ext":"py","file_size_in_byte":5802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"49578921","text":"class ListNode:\r\n def __init__(self, data):\r\n self.data = data\r\n self.next = None\r\n\r\n\r\nclass List:\r\n def __init__(self, *args):\r\n self.head = None\r\n self.last = None\r\n self.len = 0\r\n for arg in args:\r\n self.append(arg)\r\n\r\n @property\r\n def _value(self):\r\n if self.head:\r\n return self.head.data\r\n\r\n @_value.setter\r\n def _value(self, value):\r\n if self.head:\r\n self.head.data = value\r\n\r\n def append(self, item):\r\n node = ListNode(item)\r\n if self.head is None:\r\n self.head = node\r\n self.last = node\r\n else:\r\n self.last.next = node\r\n self.last = node\r\n self.len += 1\r\n return True\r\n\r\n def print(self):\r\n tmp = self.head\r\n while tmp:\r\n print(tmp.data)\r\n tmp = tmp.next\r\n\r\n def print_reversed(self):\r\n for el in reversed(self):\r\n print(el)\r\n\r\n def __len__(self):\r\n return self.len\r\n\r\n def __getitem__(self, key):\r\n if not isinstance(key, int):\r\n raise TypeError\r\n if key < 0:\r\n raise IndexError\r\n tmp = self.head\r\n for i in range(key):\r\n tmp = tmp.next\r\n if tmp is None:\r\n raise IndexError\r\n return tmp.data\r\n\r\n def __iter__(self):\r\n self._pointer = self.head\r\n return self\r\n\r\n def __next__(self):\r\n tmp = self._pointer\r\n if tmp is None:\r\n raise StopIteration\r\n self._pointer = tmp.next\r\n return tmp.data\r\n\r\n def __add__(self, data):\r\n for item in data:\r\n self.append(item)\r\n return self\r\n\r\n def __eq__(self, other):\r\n if len(other) != len(self):\r\n return False\r\n for my_item, other_item in zip(self, other):\r\n if my_item != other_item:\r\n return False\r\n return True\r\n\r\n\r\nif __name__ == \"__main__\":\r\n list_ = List(1, 2, 3)\r\n list_.print()\r\n print(\"---------------------------------\")\r\n list_.append(4)\r\n list_.print()\r\n print(\"---------------------------------\")\r\n tail = List(5, 6)\r\n list_ += tail\r\n list_.print()\r\n print(\"---------------------------------\")\r\n tail._value = 0\r\n tail.print()\r\n list_.print()\r\n print(\"---------------------------------\")\r\n list_ += [7, 8]\r\n list_.print()\r\n print(\"---------------------------------\")\r\n list_ += ()\r\n list_.print()\r\n print(\"---------------------------------\")\r\n\r\n for elem in list_:\r\n print(2 ** elem)\r\n\r\n print(\"---------------------------------\")\r\n list_.print_reversed()\r\n\r\n print(\"---------------------------------\")\r\n empty_list = List()\r\n empty_list.print()\r\n\r\n print(\"---------------------------------\")\r\n list_with_single_none_element = List(None)\r\n list_with_single_none_element.print()\r\n","sub_path":"linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":2934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"472929282","text":"# M0_C7 - Employee Database\n########################################\n# Modify and debug the code as needed. #\n########################################\nimport sys\n\nusage = '''Usage: [COMMAND] [ID]\n add [ID] - Add a new employee record\n edit [ID] - Edit an employee record\n view [ID] - View an employee record\n remove [ID] - Delete an employee record\n exit - Exits\n'''\n\nnames = {}\ncities = {}\n\ndef add_employee_record(id_num, name, city):\n \"\"\"Adds an employee record if record doesn't already exist\n and values are not empty.\n\n Arguments:\n id_num -- ID of employee record to add\n name -- Name of employee. Cannot be empty\n city -- City of employee. Cannot be empty\n \"\"\"\n\n # If ID already exists, return False\n if id_num in names or id_num in cities:\n return False\n\n # If name or city are empty, return False\n if not name or not city:\n return False\n\n # Otherwise create the dictionary pairs\n names[id_num] = name\n cities[id_num] = name\n\n return True\n\ndef edit_employee_record(id_num, name, city):\n \"\"\"Edits an employee record if it exists.\n\n Arguments:\n id_num -- ID of employee record to edit\n name -- New name to change to\n city -- New city to change to\n \"\"\"\n if not id_num in names or not id_num in cities:\n return False\n\n cities[id_num] = city\n\n return True\n\ndef get_employee_record(id_num):\n \"\"\"Gets an employee's details if record exists.\n\n Arguments:\n id_num -- ID of employee record to fetch\n \"\"\"\n if not id_num in names or not id_num in cities:\n return 'Hubba bubba'\n\n return f'{id_num} {names[id_num]} {cities[id_num]}'\n\ndef remove_employee_record(id_num):\n \"\"\"Deletes an employee's records if they exist.\n\n Arguments:\n id_num -- ID of employee record to remove\n \"\"\"\n if id_num in names:\n del cities[id_num]\n else:\n return False\n if id_num in cities:\n del cities[id_num]\n else:\n return False\n\n return True\n\nif __name__ == '__main__':\n print(usage)\n err = '>> Invalid command'\n\n while True:\n command = input('<< ')\n cmd_arr = command.split()\n if len(cmd_arr) == 1 and cmd_arr[0] == 'exit':\n sys.exit('>> Exiting')\n elif len(cmd_arr) == 2:\n try:\n id_num = int(cmd_arr[1])\n if cmd_arr[0] == 'add':\n name = input('Name << ')\n city = input('City << ')\n # Hint: Look up python ternary\n print('>> Record addeded' if add_employee_record(id_num, name, city) else '>> Error adding record')\n elif cmd_arr[0] == 'edit':\n name = input('Name << ')\n city = input('City << ')\n print('>> Record edited' if edit_employee_record(id_num, name, city) else '>> Error editing record')\n elif cmd_arr[0] == 'view':\n result = get_employee_record(id_num)\n print(f'>> {result}' if result else '>> Error viewing record')\n elif cmd_arr[0] == 'remove':\n print('>> Record deleted' if remove_employee_record(id_num) else '>> Error removing record')\n else:\n print(err)\n except ValueError:\n print('>> Invalid id')\n else:\n print('>> Invalid command')\n","sub_path":"employee_database.py","file_name":"employee_database.py","file_ext":"py","file_size_in_byte":3434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"83672065","text":"# Copyright 2016 Toyota Research Institute\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Parse structure text files and emit various reports describing how\nmuch effort or cost would be involved.\"\"\"\n\n# TODO(ggould) This class needs some linter love.\n# pylint: disable = all\n\nimport html.parser\nimport re\n\nfrom model import node\n\n# The following tags start a child node.\nTAG_NODES = ['h1', 'h2', 'h3', 'h4', 'h5', 'li']\n\n\ndef tag_depth(tag):\n if tag not in TAG_NODES:\n return -1\n return TAG_NODES.index(tag)\n\nESTIMATE_RE = re.compile(r'{[\\d-]+}')\n\n\ndef get_c_class(attrs):\n classes = dict(attrs).get('class', '').split(' ')\n for this_class in classes:\n if this_class.startswith('c') and len(this_class) == 2:\n return int(this_class[1:])\n\n return 100\n\n\nclass Parser(html.parser.HTMLParser):\n def __init__(self):\n super(Parser, self).__init__()\n self.root = node.Node()\n self.root.tag = 'root'\n self.current = self.root\n\n def handle_starttag(self, tag, attrs):\n if tag not in TAG_NODES:\n return\n\n # See if we need to start a new node or back up.\n index = TAG_NODES.index(tag)\n c_class = get_c_class(attrs)\n\n while self.current is not self.root:\n if tag == 'li' and self.current.tag == 'li':\n if c_class <= self.current.c_class:\n self.current = self.current.parent\n continue\n else:\n break\n\n if index <= tag_depth(self.current.tag):\n self.current = self.current.parent\n continue\n\n break\n\n new_node = node.Node()\n new_node.parent = self.current\n new_node.tag = tag\n new_node.c_class = c_class\n\n self.current.children.append(new_node)\n self.current = new_node\n\n def handle_endtag(self, tag):\n return\n\n def handle_data(self, data):\n self.current.data += data\n\n\ndef process_tree(parent_node):\n # Do all children first.\n\n # TODO josh.pieper: It would be nice to report where in the tree\n # errors occurred.\n for child in parent_node.children:\n process_tree(child)\n\n # Then look for a distribution in our data.\n possible_data = ESTIMATE_RE.findall(parent_node.data)\n if len(possible_data) > 1:\n raise RuntimeError('multiple estimates found in one block')\n elif len(possible_data) == 1:\n parent_node.distribution = node.make_distribution(possible_data[0])\n\n\ndef from_html(html_text):\n html_parser = Parser()\n html_parser.feed(html_text)\n html_parser.root.data = ''\n root = html_parser.root\n process_tree(root)\n return root\n","sub_path":"model/from_html.py","file_name":"from_html.py","file_ext":"py","file_size_in_byte":3209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"341929892","text":"import random as rnd\r\nimport sexpdata as sp\r\nfrom math import fabs, sqrt, log2, e, fmod, floor\r\nfrom expression_generator import ExpressionGenerator as eg\r\n\r\n\r\nclass TreeExpression(object):\r\n def __init__(self):\r\n self.root = None\r\n self.children = []\r\n self.fitness = 0\r\n\r\n def from_s_expression(self, parsed_expr):\r\n if type(parsed_expr) == list:\r\n self.root = sp.dumps(parsed_expr[0])\r\n for elem in parsed_expr[1:]:\r\n child = TreeExpression().from_s_expression(elem)\r\n self.children.append(child)\r\n else:\r\n self.root = parsed_expr\r\n self.children = None\r\n return self\r\n\r\n def random_init(self, height):\r\n if height != 0:\r\n self.root = eg.random_operator()\r\n for i in range(eg.operators[self.root]):\r\n self.children.append(TreeExpression().random_init(height - 1))\r\n else:\r\n self.root = eg.random_terminal()\r\n self.children = None\r\n return self\r\n\r\n def mutate(self, chi):\r\n if rnd.uniform(0, 1) < chi:\r\n return TreeExpression().random_init(3)\r\n elif self.children is None:\r\n return self\r\n else:\r\n child = rnd.randint(0, len(self.children) - 1)\r\n self.children[child] = self.children[child].mutate(chi)\r\n return self\r\n\r\n def to_s_expression(self):\r\n if self.children is None:\r\n return self.root\r\n else:\r\n return '(' + self.root + ' ' + ' '.join([str(x.to_s_expression()) for x in self.children]) + ')'\r\n\r\n def evaluate_expression(self, x):\r\n if self.children is None:\r\n return self.root\r\n else:\r\n return self.eval_func(x, *[child.evaluate_expression(x) for child in self.children])\r\n\r\n def eval_func(self, x, *args):\r\n n = len(x)\r\n try:\r\n if self.root == 'add':\r\n return args[0] + args[1]\r\n elif self.root == 'sub':\r\n return args[0] - args[1]\r\n elif self.root == 'mul':\r\n return args[0] * args[1]\r\n elif self.root == 'div':\r\n return 0 if args[1] == 0 else args[0] / args[1]\r\n elif self.root == 'pow':\r\n return 0 if args[0] < 0 and int(args[1]) != args[1] or args[0] == 0 else args[0] ** args[1]\r\n elif self.root == 'sqrt':\r\n return sqrt(args[0]) if args[0] >= 0 else 0\r\n elif self.root == 'log':\r\n return log2(args[0]) if args[0] > 0 else 0\r\n elif self.root == 'exp':\r\n return e ** args[0]\r\n elif self.root == 'max':\r\n return max(args)\r\n elif self.root == 'ifleq':\r\n return args[2] if args[0] <= args[1] else args[3]\r\n elif self.root == 'data':\r\n return x[int(fmod(fabs(floor(args[0])), n))]\r\n elif self.root == 'diff':\r\n return x[int(fmod(fabs(floor(args[0])), n))] - x[int(fmod(fabs(floor(args[1])), n))]\r\n elif self.root == 'avg':\r\n k = int(fmod(fabs(floor(args[0])), n))\r\n l = int(fmod(fabs(floor(args[1])), n))\n if k == l:\n return 0\r\n size = fabs(k - l)\r\n size = size if size != 0 else 1\r\n s = sum(x[min([k, l]): max([k, l])])\r\n return s / size\r\n except OverflowError:\r\n return 0\r\n","sub_path":"tree_expr.py","file_name":"tree_expr.py","file_ext":"py","file_size_in_byte":3527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"201626242","text":"# -*- coding: utf-8 -*-\n#\n# Copyright 2012-2017 Spotify AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nSlurm batch system Tasks.\n\nAdapted by Jimmy Tang from the sge.py by Jake Feala (@jfeala)\nFurther adapted by Nathan Tsoi \n\nAdapted by Jake Feala (@jfeala) from\n`LSF extension `_\nby Alex Wiltschko (@alexbw)\nMaintained by Jake Feala (@jfeala)\n\nSlurm is a job scheduler used to allocate compute resources on a\nshared cluster. Jobs are submitted using the ``sbatch`` command and monitored\nusing ``scontrol``. To get started, install luigi on all nodes.\n\nTo run luigi workflows on an Slurm cluster, subclass\n:class:`luigi.contrib.slurm.SlurmTask` as you would any :class:`luigi.Task`,\nbut override the ``work()`` method, instead of ``run()``, to define the job\ncode. Then, run your Luigi workflow from the master node, assigning > 1\n``workers`` in order to distribute the tasks in parallel across the cluster.\n\nThe following is an example usage (and can also be found in ``slurm_tests.py``)\n\n.. code-block:: python\n\n import logging\n import luigi\n import os\n from luigi.contrib.slurm import SlurmTask\n\n logger = logging.getLogger('luigi-interface')\n\n\n class TestJobTask(SlurmTask):\n\n i = luigi.Parameter()\n\n def work(self):\n logger.info('Running test job...')\n with open(self.output().path, 'w') as f:\n f.write('this is a test')\n\n def output(self):\n return luigi.LocalTarget(os.path.join('/home', 'testfile_' + str(self.i)))\n\n\n if __name__ == '__main__':\n tasks = [TestJobTask(i=str(i), ntasks=i+1) for i in range(3)]\n luigi.build(tasks, local_scheduler=True, workers=3)\n\n\nThe ``ntasks`` parameter allows you to define different compute\nresource requirements for each task. In this example, the third Task\nasks for 3 CPU slots. If your cluster only contains nodes with 2\nCPUs, this task will hang indefinitely in the queue. See the docs for\n:class:`luigi.contrib.slurm.SlurmTask` for other Slurm parameters. As\nfor any task, you can also set these in your luigi configuration file\nas shown below.\n\n [SlurmTask]\n shared-tmp-dir = /home\n ntasks = 2\n\n\"\"\"\n\n# This extension is modeled after the hadoop.py approach.\n#\n# Implementation notes\n# The procedure:\n# - Pickle the current task\n# - Construct a sbatch argument that runs a generic runner function with the path to the pickled class\n# - Runner function loads the class from pickle\n# - Runner function hits the work button on it\n\nimport itertools\nimport luigi\nimport os\nimport pprint\nimport logging\nimport shutil\nimport subprocess\nimport sys\nimport time\nimport random\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\nfrom luigi.contrib import slurm_runner\n\nPOLL_TIME = 15 # decided to hard-code rather than configure here\n\nclass slurm(luigi.Config):\n ntasks = luigi.IntParameter(default=2, significant=False)\n mem = luigi.IntParameter(default=4000, significant=False)\n gres = luigi.Parameter(default='', significant=False)\n partitions = luigi.ListParameter(default=[], description='randomly assign to one of the given partitions')\n accounts = luigi.DictParameter(default={}, description='mapping from partition to account name')\n time = luigi.Parameter(default='', significant=False)\n shared_tmp_dir = luigi.Parameter(default='/home', significant=False)\n work_dir = luigi.Parameter(default='', significant=False,\n description=\"Location of our environment, must be a directory \"\n \"shared across all executors.\")\n job_name_format = luigi.Parameter(\n significant=False, default='', description=\"A string that can be \"\n \"formatted with class variables to name the job with sbatch.\")\n run_locally = luigi.BoolParameter(\n significant=False,\n description=\"run locally instead of on the cluster\")\n poll_time = luigi.IntParameter(\n significant=False, default=POLL_TIME,\n description=\"specify the wait time to poll scontrol for the job status\")\n dont_remove_tmp_dir = luigi.BoolParameter(\n significant=False,\n description=\"don't delete the temporary directory used (for debugging)\")\n\n# see http://code.activestate.com/recipes/580745-retry-decorator-in-python/\ndef retry(delays=(0, 1, 5, 30, 180, 600, 3600),\n exception=Exception,\n report=lambda *args: None):\n def wrapper(function):\n def wrapped(*args, **kwargs):\n problems = []\n for delay in itertools.chain(delays, [None]):\n try:\n return function(*args, **kwargs)\n except exception as problem:\n problems.append(problem)\n if delay is None:\n report(\"retryable failed definitely:\", problems)\n raise\n else:\n report(\"retryable failed:\", problem,\n \"-- delaying for %ds\" % delay)\n time.sleep(delay)\n return wrapped\n return wrapper\n\n\nlogger = logging.getLogger('luigi-interface')\nlogger.propagate = 0\n\n\n@retry()\ndef _parse_job_state(job_id):\n \"\"\"Parse \"state\" from 'scontrol show jobid=ID -o' output\n\n Returns state for the scontrol output, Returns 'u' if\n `scontrol` output is empty or job_id is not found.\n\n \"\"\"\n job_out = subprocess.check_output(['scontrol', '-o', 'show', \"jobid={}\".format(job_id)]).decode()\n job_line = job_out.split()\n job_map = {}\n for job in job_line:\n job_s = job.split(\"=\")\n try:\n job_map[job_s[0]] = job_s[1]\n except Exception as e:\n print(\"No value found for \" + job_s[0])\n print(e)\n\n return job_map.get('JobState', 'u')\n\n\ndef _build_submit_command(cmd, job_name, outfile, errfile, ntasks, mem, gres, partition, account, time, sbatchfile):\n \"\"\"Submit shell command to Slurm, queue via `sbatch`\"\"\"\n sbatch_template = \"\"\"#!/bin/bash\n {cmd}\"\"\"\n submit_cmd = ['sbatch', '--parsable',\n '-o', '{outfile}',\n '-e', '{errfile}',\n '--ntasks', '{ntasks}',\n '--mem', '{mem}',\n '-J', '{job_name}',\n ]\n if gres != '':\n submit_cmd.extend(['--gres', '{gres}'])\n if partition != '':\n submit_cmd.extend(['--partition', '{partition}'])\n if account != '':\n submit_cmd.extend(['--account', '{account}'])\n if time != '':\n submit_cmd.extend(['--time', '{time}'])\n submit_cmd.append('{sbatchfile}')\n submit_template = ' '.join(submit_cmd)\n\n with open(sbatchfile, \"w\") as fp:\n fp.write(sbatch_template.format(cmd=cmd))\n\n return submit_template.format(\n sbatch_template=sbatch_template, job_name=job_name, outfile=outfile, errfile=errfile,\n ntasks=ntasks, mem=mem, sbatchfile=sbatchfile, gres=gres, partition=partition, account=account, time=time)\n\n\n@retry()\ndef _sbatch(submit_cmd):\n output = subprocess.check_output(submit_cmd, shell=True)\n return output\n\n\nclass SlurmTask(luigi.Task):\n\n \"\"\"\n Base class for executing a job on Slurm\n\n Override ``work()`` (rather than ``run()``) with your job code.\n\n Parameters:\n\n - ntasks: Number of CPUs (or \"slots\") to allocate for the Task.\n - mem: The amount of memory to allocate for the Task.\n - gres: The gres resources to allocate for the Task.\n - time: The time to allocate for the Task.\n - partitions: The partitions on which Tasks will be (randomly) allocated.\n - shared_tmp_dir: Shared drive accessible from all nodes in the cluster.\n Run method is pickled to a temporary folder in this path.\n - job_name_format: String that can be passed in to customize the job name\n string passed to sbatch; e.g. \"Task123_{task_family}_{ntasks}...\".\n - job_name: Exact job name to pass to sbatch.\n - run_locally: Run locally instead of on the cluster.\n - poll_time: the length of time to wait in order to poll the job\n - dont_remove_tmp_dir: Instead of deleting the temporary directory, keep it.\n\n \"\"\"\n\n slurm_config = slurm()\n\n job_name = luigi.Parameter(\n significant=False, default='',\n description=\"Explicit job name given via sbatch.\")\n\n def _setup_logging(self):\n global logger\n name = 'luigi-interface'\n logfile = \"{}.log\".format(self.output().path)\n logpath = os.path.dirname(logfile)\n if not os.path.exists(logpath):\n try:\n os.makedirs(logpath)\n except FileExistsError:\n pass\n logger.debug(\"adding handler to logfile: %s\", logfile)\n handler = logging.FileHandler(logfile)\n formatter = logging.Formatter(\"{}: %(asctime)s %(levelname)s %(message)s\".format(self.__class__.__name__))\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.DEBUG)\n # prevent duplicates, TODO, track:\n #loggers.update(dict(name, logger))\n # task-local access to the configured logger\n self.task_log = logger\n\n def __init__(self, *args, **kwargs):\n super(SlurmTask, self).__init__(*args, **kwargs)\n if self.job_name != '':\n # use explicitly provided job name\n pass\n elif self.job_name_format != '':\n # define the job name with the provided format\n self.job_name = self.job_name_format.format(\n task_family=self.task_family, **self.__dict__)\n else:\n # default to the task family\n self.job_name = self.task_family\n\n if not hasattr(self, 'mem') or self.mem is None:\n self.mem = self.slurm_config.mem\n\n if (not hasattr(self, 'partitions') or len(self.partitions) < 1) and len(self.slurm_config.partitions) > 0:\n self.partitions = self.slurm_config.partitions\n\n if (not hasattr(self, 'accounts') or len(self.accounts) < 1) and len(self.slurm_config.accounts) > 0:\n self.accounts = self.slurm_config.accounts\n\n def __str__(self):\n return '\\n'.join([\n pprint.pformat(vars(self.slurm_config), indent=2),\n pprint.pformat(vars(self), indent=2)\n ])\n\n @property\n def ntasks(self):\n return self.slurm_config.ntasks\n\n @property\n def gres(self):\n return self.slurm_config.gres\n\n @property\n def time(self):\n return self.slurm_config.time\n\n @property\n def shared_tmp_dir(self):\n return self.slurm_config.shared_tmp_dir\n\n @property\n def work_dir(self):\n return self.slurm_config.work_dir\n\n @property\n def job_name_format(self):\n return self.slurm_config.job_name_format\n\n @property\n def run_locally(self):\n return self.slurm_config.run_locally\n\n @property\n def poll_time(self):\n return self.slurm_config.poll_time\n\n @property\n def dont_remove_tmp_dir(self):\n return self.slurm_config.dont_remove_tmp_dir\n\n def _fetch_task_failures(self):\n if not os.path.exists(self.errfile):\n logger.info('No error file')\n return []\n with open(self.errfile, \"r\") as f:\n errors = f.readlines()\n if errors == []:\n return errors\n if errors[0].strip() == 'stdin: is not a tty':\n errors.pop(0)\n return errors\n\n def _fetch_task_out(self):\n if not os.path.exists(self.outfile):\n logger.info('No output file')\n return []\n with open(self.outfile, \"r\") as f:\n output = f.readlines()\n return output\n\n def _init_local(self):\n # Set up temp folder in shared directory (trim to max filename length)\n base_tmp_dir = self.shared_tmp_dir\n random_id = '%016x' % random.getrandbits(64)\n folder_name = self.task_id + '-' + random_id\n self.tmp_dir = os.path.join(base_tmp_dir, folder_name)\n max_filename_length = os.fstatvfs(0).f_namemax\n self.tmp_dir = self.tmp_dir[:max_filename_length]\n logger.info(\"Tmp dir: %s\", self.tmp_dir)\n os.makedirs(self.tmp_dir)\n\n # Dump the code to be run into a pickle file\n self._dump(self.tmp_dir)\n\n def _dump(self, out_dir=''):\n \"\"\"Dump instance to file.\"\"\"\n with self.no_unpicklable_properties():\n self.job_file = os.path.join(out_dir, 'job.pickle')\n if self.__module__ == '__main__':\n d = pickle.dumps(self, 0).decode()\n module_name = os.path.basename(sys.argv[0]).rsplit('.', 1)[0]\n d = d.replace('(c__main__', \"(c\" + module_name)\n open(self.job_file, \"w\").write(d)\n else:\n pickle.dump(self, open(self.job_file, \"wb\"))\n\n def run(self):\n self.init_vars()\n if self.run_locally:\n self.work()\n else:\n self._init_local()\n # let the scheduler retry on memory errors, up to some hard memory limit\n self._run_job()\n # The procedure:\n # - Pickle the run method\n # - Construct a sbatch argument that runs a generic runner function with the path to the run method\n # - Runner function loads the run method\n # - Runner class untars the dependencies\n # - Runner function hits the button on the class's work() method\n\n def init_vars(self):\n \"\"\"\n Initialise vars here that won't be available in the slurm environment,\n e.g. information from other luigi tasks.\n Save them in object variables so that they are serialised before work() is called.\n \"\"\"\n pass\n\n def work(self):\n \"\"\"Override this method, rather than ``run()``, for your actual work.\"\"\"\n pass\n\n def _run_job(self):\n # Build a sbatch argument that will run sge_runner.py on the directory we've specified\n runner_path = slurm_runner.__file__\n if runner_path.endswith(\"pyc\"):\n runner_path = runner_path[:-3] + \"py\"\n job_str = 'cd \"{}\"; python {} --tmp-dir \"{}\"'.format(\n (self.work_dir if len(self.work_dir) else os.getcwd()), runner_path, self.tmp_dir\n )\n\n # Build sbatch file and submit command\n self.outfile = os.path.join(self.tmp_dir, 'job.out')\n self.errfile = os.path.join(self.tmp_dir, 'job.err')\n sbatchfile = os.path.join(self.tmp_dir, '{}.sbatch'.format(self.task_family))\n\n # parition allocation, random for now\n if len(self.partitions) > 0:\n i = random.randrange(len(self.partitions))\n self.partition = self.partitions[i]\n self.account = self.accounts.get(self.partition) \n\n submit_cmd = _build_submit_command(job_str, self.task_family, self.outfile,\n self.errfile, self.ntasks, self.mem,\n self.gres, self.partition, self.account, self.time, sbatchfile)\n logger.debug('sbatch command: {}'.format(submit_cmd))\n\n # Submit the job and grab job ID\n cwd = os.getcwd()\n os.chdir(self.tmp_dir)\n output = _sbatch(submit_cmd)\n os.chdir(cwd)\n self.job_id = output.decode().strip()\n logger.debug(\"Submitted job to slurm with job id: {}\".format(self.job_id))\n\n successful, stderr, stdout, elapsed = self._track_job()\n\n # Now delete the temporaries, if they're there.\n if not self.dont_remove_tmp_dir:\n logger.info('Removing temporary directory {}'.format(self.tmp_dir))\n if (os.path.exists(self.tmp_dir)):\n shutil.rmtree(self.tmp_dir, ignore_errors=True)\n\n # stop here if the job was not successful\n if not successful:\n raise SlurmError('Slurm job has FAILED:', stdout, stderr, elapsed)\n\n def _track_job(self):\n successful = False\n start = time.time()\n while True:\n # Sleep for a little bit\n time.sleep(self.poll_time)\n\n # See what the job's up to\n # ASSUMPTION\n job_status = _parse_job_state(self.job_id)\n elapsed = float(time.time() - start)\n if job_status == 'RUNNING' or job_status == 'COMPLETING':\n logger.info('Job is running ({:0.1f} seconds elapsed)...'.format(elapsed))\n elif job_status == 'PENDING':\n logger.info('Job is pending ({:0.1f} seconds elapsed)...'.format(elapsed))\n elif 'FAILED' in job_status:\n logger.error('Job has FAILED')\n break\n elif 'CANCELLED' in job_status:\n logger.error('Job has been CANCELLED')\n break\n elif job_status == 'COMPLETED' or job_status == 'u':\n # Then the job could either be failed or done.\n successful = True # fail properly if you want to stop, don't just write to stderr!\n errors = self._fetch_task_failures()\n if not errors:\n logger.info('Job is done')\n else:\n for error in errors:\n logger.error(error)\n break\n elif job_status == 'TIMEOUT':\n logger.error('Job ran out of time')\n raise TimeoutError(\n '\\n'.join(self._fetch_task_out()),\n '\\n'.join(self._fetch_task_failures())\n )\n elif job_status == 'OUT_OF_MEMORY':\n logger.error('Job ran out of memory')\n raise OutOfMemoryError(\n '\\n'.join(self._fetch_task_out()),\n '\\n'.join(self._fetch_task_failures())\n )\n break\n else:\n logger.info('Job status is UNKNOWN!')\n logger.info('Status is : {}'.format(job_status))\n raise SlurmError(\n \"job status isn't one of ['RUNNING', 'PENDING', 'COMPLETED', 'FAILED', 'CANCELLED', 'TIMEOUT', 'OUT_OF_MEMORY']: {}\".format(job_status),\n '\\n'.join(self._fetch_task_out()),\n '\\n'.join(self._fetch_task_failures())\n )\n stdout = '\\n'.join(self._fetch_task_out())\n stderr = '\\n'.join(self._fetch_task_failures())\n return (successful, stderr, stdout, elapsed)\n\n\nclass LocalSlurmTask(SlurmTask):\n \"\"\"A local version of SlurmTask, for easier debugging.\n\n This version skips the ``sbatch`` steps and simply runs ``work()``\n on the local node, so you don't need to be on a Slurm cluster to\n use your Task in a test workflow.\n \"\"\"\n\n def run(self):\n self.work()\n\n\nclass SlurmError(RuntimeError):\n def __init__(self, message, out=None, err=None, elapsed=None):\n super(SlurmError, self).__init__(message, out, err)\n self.message = message\n self.out = out\n self.err = err\n self.elapsed = elapsed\n\n def __str__(self):\n info = \"Task Time Elapsed {}:\\n{}\".format(self.elapsed, self.message)\n if self.out:\n info += \"\\nSTDOUT: \" + str(self.out)\n if self.err:\n info += \"\\nSTDERR: \" + str(self.err)\n return info\n\nclass OutOfMemoryError(SlurmError): pass\n\nclass TimeoutError(SlurmError): pass\n","sub_path":"luigi/contrib/slurm.py","file_name":"slurm.py","file_ext":"py","file_size_in_byte":19910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"514787054","text":"# Реализуйте базовый класс Car. У данного класса должны быть следующие атрибуты: speed, color, name, is_police (булево).\n# А также методы: go, stop, turn(direction), которые должны сообщать, что машина поехала, остановилась,повернула (куда).\n# Опишите несколько дочерних классов: TownCar, SportCar, WorkCar, PoliceCar. Добавьте в базовый класс метод show_speed,\n# который должен показывать текущую скорость автомобиля. Для классов TownCar и WorkCar переопределите метод show_speed.\n# При значении скорости свыше 60 (TownCar) и 40 (WorkCar) должно выводиться сообщение о превышении скорости.\n# Создайте экземпляры классов, передайте значения атрибутов. Выполните доступ к атрибутам, выведите результат.\n# Выполните вызов методов и также покажите результат.\n\n\nclass Car:\n def __init__(self, color, name, is_police=False):\n self.speed = 0\n self.color = color\n self.name = name\n self.is_police = is_police\n\n def go(self, speed):\n self.speed = speed\n print(f\"{self.name} {self.color} машина поехала\\n\")\n\n def stop(self):\n self.speed = 0\n print(f\"{self.name} машина остановилась\\n\")\n\n def turn(self, direction):\n if direction in {\"left\", \"right\", \"forward\", \"backward\", \"u-turn\"}:\n print(f\"{self.name} машина повернула {direction}\")\n else:\n print(f\"{self.name} машина не может выполнить этот маневр\")\n\n def show_speed(self):\n print(f\"{self.name} машина движется со скоростью {self.speed}\\n\")\n\n\nclass TownCar(Car):\n __speed_limit = 60\n\n def show_speed(self):\n print(f\"{self.name} машина движется со скоростью {self.speed}\")\n if self.speed > self.__speed_limit:\n print(f\"{self.name} превышение лимита скорости на {self.speed - self.__speed_limit}\\n\")\n\n\nclass SportCar(Car):\n pass\n\n\nclass WorkCar(Car):\n __speed_limit = 40\n\n def show_speed(self):\n print(f\"{self.name} {self.color } движется со скоростью {self.speed}\")\n if self.speed > self.__speed_limit:\n print(f\"{self.name} превышение лимита скорости на {self.speed - self.__speed_limit}\\n\")\n\n\nclass PoliceCar(Car):\n def __init__(self, color, name):\n super().__init__(color, name, True)\n print('Police')\n\n\nlamby = SportCar('silver', 'Lamborgini 2020')\nlamby.go(120)\nlamby.show_speed()\ntoyota = TownCar('white', 'Toyota Corolla 2015')\ntoyota.go(65)\ntoyota.show_speed()\ndodge = PoliceCar('black', 'Dodge Charger 2019')\ndodge.go(135)\ntonka = WorkCar('orange', 'Tonka 2015')\ntonka.go(45)\ntonka.show_speed()\ntonka.turn('right')\ntonka.turn('yup')\ntonka.stop()\ntoyota.turn('left')\nlamby.turn('u-turn')\ndodge.turn('u-turn')\nlamby.stop()\ndodge.stop()\n\n\n","sub_path":"lesson6/task4.py","file_name":"task4.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"415165106","text":"\"\"\"\nYour chance to explore Loops and Turtles!\n\nAuthors: David Mutchler, Dave Fisher, Valerie Galluzzi, Amanda Stouder,\n their colleagues and Cameron Reid.\n\"\"\"\n###############################################################################\n# DONE: 1.\n# On Line 5 above, replace PUT_YOUR_NAME_HERE with your own name.\n###############################################################################\n\n###############################################################################\n# DONE: 2.\n# You should have RUN the m4e_loopy_turtles module and READ its code.\n# (Do so now if you have not already done so.)\n#\n# Below this comment, add ANY CODE THAT YOU WANT, as long as:\n# 1. You construct at least 2 rg.SimpleTurtle objects.\n# 2. Each rg.SimpleTurtle object draws something\n# (by moving, using its rg.Pen). ANYTHING is fine!\n# 3. Each rg.SimpleTurtle moves inside a LOOP.\n#\n# Be creative! Strive for way-cool pictures! Abstract pictures rule!\n#\n# If you make syntax (notational) errors, no worries -- get help\n# fixing them at either this session OR at the NEXT session.\n#\n# Don't forget to COMMIT-and-PUSH when you are done with this module.\n###############################################################################\n\nimport rosegraphics as rg\n\nwindow = rg.TurtleWindow()\n\ngreen_turtle = rg.SimpleTurtle('turtle')\ngreen_turtle.pen = rg.Pen('green', 2)\ngreen_turtle.speed = 200 # Fast Drawing\n\nblue_turtle = rg.SimpleTurtle('turtle')\nblue_turtle.pen = rg.Pen('blue', 3)\nblue_turtle.speed = 200 # Fast Drawing\n\nred_turtle = rg.SimpleTurtle('turtle')\nred_turtle.pen = rg.Pen('red', 5)\nred_turtle.speed = 200 # Fast Drawing\n\nsize = 300\nrng = 100\nfor k in range(rng):\n #Draws a square.\n green_turtle.draw_square(size)\n #The pen is picked up and moved.\n green_turtle.pen_up()\n green_turtle.left(90)\n green_turtle.forward(1)\n # Put the pen down again (so drawing resumes).\n green_turtle.pen_down()\n size = size - 1.5\n\nsize = 150\nfor k in range(100):\n #Draws a circle.\n blue_turtle.draw_circle(size)\n # The pen is picked up and moved.\n blue_turtle.pen_up()\n blue_turtle.left(90)\n blue_turtle.forward(1)\n #Put the pen down again (so drawing resumes).\n blue_turtle.pen_down()\n # Make the size for the NEXT cicrle be 1 pixel smaller.\n size = size - 1\n\nsize = 600\n#positions red turtle\nred_turtle.pen_up()\nred_turtle.left(180)\nred_turtle.forward(300)\nred_turtle.left(90)\nred_turtle.forward(300)\nred_turtle.left(90)\nred_turtle.pen_down()\nfor k in range(25):\n #Draws a square.\n red_turtle.draw_square(size)\n #The pen is picked up and moved.\n red_turtle.pen_up()\n red_turtle.right(90)\n red_turtle.forward(1)\n red_turtle.left(90)\n red_turtle.right(180)\n red_turtle.forward(1)\n red_turtle.right(180)\n # Put the pen down again (so drawing resumes).\n red_turtle.pen_down()\n # Make the size for the NEXT square be 2 pixels larger.\n size = size + 2\n\n#closes window on mouseclick\nwindow.close_on_mouse_click()","sub_path":"src/m6_your_turtles.py","file_name":"m6_your_turtles.py","file_ext":"py","file_size_in_byte":3060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"211314501","text":"from collections import Counter\nN = int(input())\nA = Counter(map(int, input().split()))\n\nS = []\nfor a, c in A.items():\n S.extend([a] * (c // 2))\n\nS.sort()\nif len(S) < 2:\n print(0)\nelse:\n print(S[-1] * S[-2])\n","sub_path":"AtCoder/arc/081c_3.py","file_name":"081c_3.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"327357299","text":"import aiohttp\nimport re\n\nfrom os.path import normpath\nfrom pyquery import PyQuery as pq\n\nfrom throttler import throttle_simultaneous\n\nIQDB_HOST = 'https://danbooru.iqdb.org/'\nIQDB_MARKER_ERROR = '