diff --git "a/4121.jsonl" "b/4121.jsonl" new file mode 100644--- /dev/null +++ "b/4121.jsonl" @@ -0,0 +1,646 @@ +{"seq_id":"408284161","text":"\"\"\"\nFile for calculating the voltage inverter threshold voltage.\n\n\"\"\"\n\nfrom uncertainties import ufloat\n\n#----------------------------------------------------------------------\ndef vt_calc(Vdd, VTp, VTn, muP, muN):\n \"\"\"Equation for threshold voltage.\"\"\"\n VT = (Vdd + VTp + VTn * ((muN / muP)** (1/2))) / (1 + ((muN / muP)**(1/2)))\n return VT\n\n# main program\n# For ZnPc in series with F8ZnPc\n#VTp = ufloat(2.53, 10.80)\n#VTp = ufloat(16.345, 11)\n#VTn = ufloat(-13.57, 2.31)\n#VTn = ufloat(2.01, 22)\n#muP = ufloat(1.7, 0.1)\n#muN = ufloat(1.0, 0.1)\n# For 1:1.5\nVTp = ufloat(25.0, 2) #check threshold whith +50\nVTn = ufloat(15.4, 4) #check threshold whith +50\nmuP = ufloat(8.3, 0.2)\n# muP = ufloat(1.7e-6, 1.2e-6)\nmuN = ufloat(5.5, 0.1)\n# muN = ufloat(2.8e-8, 5e-9)\nVdd = 50\n\nvt = vt_calc(Vdd, VTp, VTn, muP, muN)\n\nprint('Threshold voltage is: \\t', vt)","sub_path":"plot-scripts/voltage-inverter-threshold.py","file_name":"voltage-inverter-threshold.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"281795530","text":"import asyncio\nimport datetime\nimport pytz\nimport os\nimport os.path\n\nfrom astral import LocationInfo\nfrom astral.sun import sun\n\nimport config\nimport data_handler\nfrom plotter import Plotter\n\nif config.mastodon_enabled:\n from integration.mastodon_integration import MastodonIntegration\n\ntimezone = pytz.timezone(config.tz_name)\ncity = LocationInfo(name=config.tz_name.split('/')[1], region=config.tz_region, timezone=config.tz_name, latitude=config.latitude, longitude=config.longitude)\nsun_observer = city.observer\nsun_observer.elevation = config.elevation\n\n# Set Path\npath = os.path.dirname(os.path.abspath(__file__))\n\nasync def poll():\n # Sun information\n sun_info = sun(sun_observer)\n tz_info = (sun_info['sunrise']).tzinfo\n\n # Time information\n now = datetime.datetime.now()\n now = timezone.localize(now)\n today = now.strftime(\"%Y-%m-%d\")\n\n # Set the filename to be the human-readable timestamp\n csv_file = os.path.join(path, \"data\", today + \".csv\")\n png_file = os.path.join(path, \"out\", today + \".png\")\n\n suntime = datetime.datetime.now(tz_info)\n if suntime > sun_info['dawn'] and suntime < sun_info['dusk']:\n await data_handler.collect(config.IP_address, csv_file)\n\n if suntime > sun_info['dusk']:\n if not os.path.exists(png_file):\n kWh, x, y = data_handler.summarize(csv_file, timezone)\n Plotter(timezone, config.city_name, config.installed_max).generate_image(today, kWh, x, y, png_file)\n\n if config.mastodon_enabled:\n mastodon = MastodonIntegration(config.mastodon_api_base, config.mastodon_client_key, config.mastodon_client_secret, config.mastodon_access_token)\n mastodon.toot(kWh, png_file)\n\nif __name__ == '__main__':\n asyncio.run(poll())\n","sub_path":"solar.py","file_name":"solar.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"233994249","text":"from collections import deque\nimport torch\nfrom .Network import Net\nimport numpy as np\nimport random\n\n\nclass Agent:\n def __init__(self, AGENT_PARAMS):\n \"Parameters are set in the params.py file\"\n self.memory_size = AGENT_PARAMS[\"MEMORY_LENGTH\"]\n self.memory = deque(maxlen=self.memory_size)\n self.load_model = AGENT_PARAMS[\"LOAD_MODEL\"]\n self.model_name = AGENT_PARAMS[\"LOAD_MODEL_NAME\"]\n self.save_model = AGENT_PARAMS[\"SAVE_MODEL\"]\n self.train_model = AGENT_PARAMS[\"TRAIN_MODEL\"]\n\n self.load_model_path = AGENT_PARAMS[\"LOAD_MODEL_PATH\"]\n self.save_model_path = AGENT_PARAMS[\"SAVE_MODEL_PATH\"]\n\n self.n_tanks = AGENT_PARAMS[\"N_TANKS\"]\n self.state_size = AGENT_PARAMS[\"OBSERVATIONS\"]\n self.action_state = None\n self.action_size = AGENT_PARAMS[\"VALVE_POSITIONS\"]\n self.action_choices = self._build_action_choices(self.action_size)\n self.actions = None\n self.action_delay_cnt = [9] * self.n_tanks\n self.action_delay = AGENT_PARAMS[\"ACTION_DELAY\"]\n\n self.epsilon = AGENT_PARAMS[\"EPSILON\"]\n self.epsilon_min = AGENT_PARAMS[\"EPSILON_MIN\"]\n self.epsilon_decay = AGENT_PARAMS[\"EPSILON_DECAY\"]\n self.gamma = AGENT_PARAMS[\"GAMMA\"]\n\n self.learning_rate = AGENT_PARAMS[\"LEARNING_RATE\"]\n self.hl_size = AGENT_PARAMS[\"HIDDEN_LAYER_SIZE\"]\n self.batch_size = AGENT_PARAMS[\"BATCH_SIZE\"]\n\n self.Q_eval, self.Q_next = [], []\n for i in range(self.n_tanks):\n Q_eval_, Q_next_ = self._build_ANN(\n self.state_size,\n self.hl_size,\n self.action_size,\n learning_rate=self.learning_rate,\n i=i,\n )\n self.Q_eval.append(Q_eval_)\n self.Q_next.append(Q_next_)\n\n def _build_action_choices(self, action_size):\n \"Create a list of the valve positions ranging from 0-1\"\n valve_positions = []\n for i in range(action_size):\n valve_positions.append((i) / (action_size - 1))\n return np.array(list(reversed(valve_positions)))\n\n def _build_ANN(\n self, input_size, hidden_size, action_size, learning_rate, i\n ):\n if self.load_model[i]:\n path = (\n self.load_model_path\n + self.model_name[i]\n + \".pt\"\n )\n pytorch_path = torch.load(path)\n n_hl = (len(pytorch_path)-3)\n if n_hl == 0: # zero hidden later\n h_size = []\n elif n_hl == 1: # 1 hidden layer\n h_size = [len(pytorch_path['input.weight'])]\n elif n_hl == 3: # 2 hidden layers\n h_size = [len(pytorch_path['input.weight']), len(pytorch_path['hl1.bias'])]\n else:\n raise ValueError\n Q_net = Net(input_size, h_size, action_size, learning_rate[i])\n Q_net.load_state_dict(pytorch_path)\n Q_net.eval()\n return Q_net, Q_net\n \"Creates or loads a ANN valve function approximator\"\n\n Q_eval = Net(input_size, hidden_size[i], action_size, learning_rate[i])\n Q_next = Net(input_size, hidden_size[i], action_size, learning_rate[i])\n return Q_eval, Q_next\n\n def get_z(self, action):\n z = []\n for action in self.actions:\n z.append(self.action_choices[action])\n return z\n\n def remember(self, states, reward, terminated, t):\n \"Stores instances of each time step\"\n\n replay = []\n for i in range(self.n_tanks):\n\n if terminated[i]:\n if len(states) <= self.action_delay[i] + 2:\n action_state = states[i][0]\n else:\n action_state_index = -self.action_delay_cnt[i] - 2\n action_state = states[action_state_index][i]\n replay.append(\n np.array(\n [\n action_state,\n self.actions[i],\n reward[i],\n states[-1][i],\n terminated[i],\n False,\n str(i) + \"model\",\n ]\n )\n )\n\n elif (\n self.action_delay_cnt[i] >= self.action_delay[i]\n and t >= self.action_delay[i]\n ):\n action_state = states[-self.action_delay[i] - 2][i]\n replay.append(\n np.array(\n [\n action_state,\n self.actions[i],\n reward[i],\n states[-1][i],\n terminated[i],\n False,\n str(i) + \"model\",\n ]\n )\n )\n elif True in terminated:\n\n action_state_index = -self.action_delay_cnt[i] - 2\n try:\n action_state = states[action_state_index][i]\n except IndexError:\n action_state = states[0][i]\n replay.append(\n np.array(\n [\n action_state,\n self.actions[i],\n reward[i],\n states[-1][i],\n terminated[i],\n False,\n str(i) + \"model\",\n ]\n )\n )\n if True in terminated:\n self.memory.append(replay)\n elif not len(replay) == self.n_tanks:\n return\n else:\n self.memory.append(replay)\n\n def act_greedy(self, state, i):\n \"Predict the optimal action to take given the current state\"\n\n choice = self.Q_eval[i].forward(state[i])\n action = torch.argmax(choice).item()\n return action\n\n def act(self, state):\n \"\"\"\n Agent uses the state and gives either an\n action of exploration or explotation\n \"\"\"\n actions = []\n for i in range(self.n_tanks):\n if self.action_delay_cnt[i] >= self.action_delay[i]:\n self.action_delay_cnt[i] = 0\n\n if np.random.rand() <= float(self.epsilon[i]): # Exploration\n random_action = random.randint(0, self.action_size - 1)\n action = random_action\n actions.append(action)\n else:\n action = self.act_greedy(state, i) # Exploitation\n actions.append(action)\n else:\n actions.append(self.actions[i])\n self.action_delay_cnt[i] += 1\n self.actions = actions\n return self.actions\n\n def is_ready(self):\n \"Check if enough data has been collected\"\n if len(self.memory) < self.batch_size:\n return False\n return True\n\n def Qreplay(self, e):\n \"\"\"\"\n Train the model to improve the predicted value of consecutive\n recurring states, Off policy Q-learning with batch training\n \"\"\"\n minibatch = np.array(random.sample(self.memory, self.batch_size))\n for j in range(self.n_tanks):\n if self.train_model[j]:\n agent_batch = minibatch[:, j]\n dummy_data = np.stack(agent_batch[:, 5])\n dummy_data_index = np.where(dummy_data)[0]\n agent_batch_comp = np.delete(agent_batch, dummy_data_index, axis=0)\n\n states = np.stack(agent_batch_comp[:, 0])\n actions = np.stack(agent_batch_comp[:, 1])\n rewards = np.stack(agent_batch_comp[:, 2])\n next_states = np.stack(agent_batch_comp[:, 3])\n terminated = np.stack(agent_batch_comp[:, 4])\n\n self.Q_eval[j].zero_grad()\n Qpred = self.Q_eval[j].forward(states).to(self.Q_eval[j].device)\n Qnext = (\n self.Q_next[j].forward(next_states).to(self.Q_next[j].device)\n )\n\n maxA = Qnext.max(1)[1] # to(self.Q_eval.device)\n rewards = torch.tensor(rewards, dtype=torch.float32).to(\n self.Q_eval[j].device\n )\n\n Q_target = Qpred.clone()\n for i, Qnext_a in enumerate(maxA):\n if not terminated[i]:\n Q_target[i, actions[i]] = rewards[\n i\n ] + self.gamma * torch.max(Qnext[i, Qnext_a])\n else:\n Q_target[i, actions[i]] = rewards[i]\n loss = (\n self.Q_eval[j].loss(Qpred, Q_target).to(self.Q_eval[j].device)\n )\n loss.backward()\n\n self.Q_eval[j].optimizer.step()\n self.decay_exploration(j)\n\n def decay_exploration(self, j):\n \"Lower the epsilon valvue to favour greedy actions\"\n if self.epsilon[j] > self.epsilon_min[j]:\n self.epsilon[j] = self.epsilon[j] * self.epsilon_decay[j]\n\n def reset(self, init_state):\n self.action_state = init_state[0]\n self.action = None\n self.action_delay_cnt = self.action_delay\n\n def save_trained_model(self):\n \"Save the model given a better model has been fitted\"\n for i in range(self.n_tanks):\n if self.save_model[i]:\n model_name = \"Network_\" + str(self.hl_size[i]) + \"HL\" + str(i)\n\n path = self.save_model_path + model_name + \".pt\"\n torch.save(self.Q_eval[i].state_dict(), path)\n print(\"ANN_Model was saved\")\n","sub_path":"Q_learning/Tank_1/models/Agent.py","file_name":"Agent.py","file_ext":"py","file_size_in_byte":9903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"89809870","text":"while True:\n\n tall = int(input(\"請輸入身高(cm): \")) # 這兩個需要拿進來不然會變成無限迴圈\n weight = int(input(\"請輸入體重(kg): \")) # 這兩個需要拿進來不然會變成無限迴圈\n\n if (tall == -9999) or (weight == -9999):\n break # 先判斷要不要跳出去\n\n bmi = weight / ((tall / 100) ** 2) # 計算BMI\n\n # 開始印出訊息\n print(\"\\nBMI: %.2f\" % bmi)\n if bmi >= 30:\n print(\"State: fat\")\n\n elif bmi >= 25:\n print(\"State: over weight\")\n\n elif bmi >= 18.5:\n print(\"State: normal\")\n\n elif bmi < 18.5:\n print(\"State: under weight\")","sub_path":"_4_control_procedure/406/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"273395460","text":"import pandas as pd\nimport numpy as np\nimport sklearn\nfrom sklearn import linear_model\n# from sklearn.util import shuffle\nimport matplotlib.pyplot as pyplot\nimport pickle\nfrom matplotlib import style\n\n# Import all of the data\ndata = pd.read_csv(\"student-mat.csv\", sep=\";\")\n# Reduce the data down to the attributes that we want\ndata = data[[\"G1\", \"G2\", \"G3\", \"studytime\", \"failures\", \"absences\"]]\n\n# Label - What we're trying to get\npredict = \"G3\"\n\n# Return a new data frame that doesn't have G3 in it\nx = np.array(data.drop([predict], 1))\n\n# Actual G3 values\ny = np.array(data[predict])\n\nx_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(x, y, test_size=0.1)\n\n'''\n# In this for loop, I am trying to maximize the quality of the model by using it's accuracy score\nbest = 0\nfor _ in range(100):\n # Splitting up 10% of our data into test samples\n x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(x, y, test_size=0.1)\n\n # Creating a training model\n linear = linear_model.LinearRegression()\n\n # Fit the data to find a best fit line; Stores the line in linear\n linear.fit(x_train, y_train)\n\n # Returns a value that represents the accuracy of our model\n acc = linear.score(x_test, y_test)\n print(acc)\n\n # Only write to the pickle file if the new score is better than the previously recorded best\n if acc > best:\n best = acc\n # Saving the model\n with open(\"studentmodel.pickle\", \"wb\") as f:\n pickle.dump(linear, f)\n'''\n\n# Read in our pickle file\npickel_in = open(\"studentmodel.pickle\", \"rb\")\n\n# Load our model into the variable called linear\nlinear = pickle.load(pickel_in)\n\nprint(\"Co: \\n\", linear.coef_)\nprint(\"Intercept: \\n\", linear.intercept_)\n\n# Use the model to make a prediction\npredictions = linear.predict(x_test)\n\nfor x in range(len(predictions)):\n # Print out the prediction, the input data, and the actual score\n print(predictions[x], x_test[x], y_test[x])\n\n# Plotting\np = 'G1'\nstyle.use(\"ggplot\")\npyplot.scatter(data[p], data[\"G3\"])\npyplot.xlabel(p)\npyplot.ylabel(\"Final Grade\")\npyplot.show()","sub_path":"regression.py","file_name":"regression.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"344716361","text":"# Copyright 2017 Rice University\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\nimport argparse\nimport re\nimport tensorflow as tf\nfrom itertools import chain\n\nCONFIG_GENERAL = ['model', 'latent_size', 'batch_size', 'num_epochs',\n 'learning_rate', 'print_step', 'alpha', 'beta']\nCONFIG_ENCODER = ['name', 'units', 'num_layers', 'tile']\nCONFIG_DECODER = ['units', 'num_layers', 'max_ast_depth']\nCONFIG_INFER = ['chars', 'vocab', 'vocab_size']\n\nC0 = 'CLASS0'\nUNK = '_UNK_'\nCHILD_EDGE = 'V'\nSIBLING_EDGE = 'H'\n\n\ndef length(tensor):\n elems = tf.sign(tf.reduce_max(tensor, axis=2))\n return tf.reduce_sum(elems, axis=1)\n\n\n# split s based on camel case and lower everything (uses '#' for split)\ndef split_camel(s):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1#\\2', s) # UC followed by LC\n s1 = re.sub('([a-z0-9])([A-Z])', r'\\1#\\2', s1) # LC followed by UC\n split = s1.split('#')\n return [s.lower() for s in split]\n\n\n# Do not move these imports to the top, it will introduce a cyclic dependency\nimport bayou.models.low_level_evidences.evidence\n\n\n# convert JSON to config\ndef read_config(js, chars_vocab=False):\n config = argparse.Namespace()\n\n for attr in CONFIG_GENERAL:\n config.__setattr__(attr, js[attr])\n \n config.evidence = bayou.models.low_level_evidences.evidence.Evidence.read_config(js['evidence'], chars_vocab)\n config.decoder = argparse.Namespace()\n for attr in CONFIG_DECODER:\n config.decoder.__setattr__(attr, js['decoder'][attr])\n if chars_vocab:\n for attr in CONFIG_INFER:\n config.decoder.__setattr__(attr, js['decoder'][attr])\n\n return config\n\n\n# convert config to JSON\ndef dump_config(config):\n js = {}\n\n for attr in CONFIG_GENERAL:\n js[attr] = config.__getattribute__(attr)\n\n js['evidence'] = [ev.dump_config() for ev in config.evidence]\n js['decoder'] = {attr: config.decoder.__getattribute__(attr) for attr in\n CONFIG_DECODER + CONFIG_INFER}\n\n return js\n\n\ndef gather_calls(node):\n \"\"\"\n Gathers all call nodes (recursively) in a given AST node\n\n :param node: the node to gather calls from\n :return: list of call nodes\n \"\"\"\n\n if type(node) is list:\n return list(chain.from_iterable([gather_calls(n) for n in node]))\n node_type = node['node']\n if node_type == 'DSubTree':\n return gather_calls(node['_nodes'])\n elif node_type == 'DBranch':\n return gather_calls(node['_cond']) + gather_calls(node['_then']) + gather_calls(node['_else'])\n elif node_type == 'DExcept':\n return gather_calls(node['_try']) + gather_calls(node['_catch'])\n elif node_type == 'DLoop':\n return gather_calls(node['_cond']) + gather_calls(node['_body'])\n else: # this node itself is a call\n return [node]\n","sub_path":"src/main/python/bayou/models/low_level_evidences/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"135616954","text":"#Import the url and BeautifulSoup libraries\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\n\r\ndef main():\r\n #Default factmonster url\r\n baseUrl = \"http://www.factmonster.com/country/\"\r\n\r\n #Get the user country input\r\n requestedCountry = raw_input(\"Please enter the country you wish to learn about: \") \r\n\r\n #Replace spaces with a dash\r\n if \" \" in requestedCountry:\r\n requestedCountry = requestedCountry.replace(\" \", \"-\")\r\n\r\n #Build Url\r\n requestUrl = baseUrl + requestedCountry.lower() + \".html\"\r\n\r\n #getData function using built url\r\n data = getData(requestUrl)\r\n\r\n #Output the data\r\n output(data)\r\n\r\ndef getData(URL):\r\n #Scrape the wiki page\r\n r = requests.get(URL)\r\n requestContent = r.text\r\n\r\n #Collect the data we need - Capital, Area, Population - Using Beautiful Soup\r\n soup = BeautifulSoup(requestContent, \"html.parser\")\r\n\r\n #Find Capital\r\n capitalTable = soup.find(\"p\", class_ = \"capital\")\r\n capitalText = capitalTable.get_text()\r\n capital = capitalText.split(':')\r\n capital = capital[1].split(',')\r\n capital = capital[0].strip()\r\n\r\n #Find Area\r\n areaTable = soup.find(\"p\", class_ = \"area\")\r\n areaText = areaTable.get_text()\r\n area = areaText.split(\":\")\r\n area = area[1].split('(')\r\n area = area[0].strip()\r\n\r\n #Find Population\r\n populationTable = soup.find(\"p\", class_ = \"population\")\r\n populationText = populationTable.get_text()\r\n population = populationText.split(\":\")\r\n population = population[1].split('(')\r\n population = population[0].strip()\r\n\r\n return [capital,area,population]\r\n \r\n \r\ndef output(countryInformation):\r\n print(countryInformation[0] + \" \" + countryInformation[1] + \" \" + countryInformation[2])\r\n\r\nmain()\r\n","sub_path":"Python/Tests/Webscape Test.py","file_name":"Webscape Test.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"522749051","text":"# -*- coding: utf-8 -*-\n\nfrom .textrank4zh import TextRank4Keyword, TextRank4Sentence\n\nimport sys\nsys.path.append(\"..\")\nfrom mlog.mlog import mlog\n\nclass TextRank(object):\n\t\"\"\"docstring for TextRank\"\"\"\n\tdef __init__(self):\n\t\tself.tr4w_word = TextRank4Keyword()\n\t\tself.tr4s_sent = TextRank4Sentence()\n\n\tdef filter(self,text):\n\t\tif text == \"\" or len(text) < 1:\n\t\t\tmlog.error(\"非法参数,参数不许为空\")\n\t\t\traise Exception(\"text is null\")\n\n\n\t# py2中text必须是utf8编码的str或者unicode对象,\n\t# py3中必须是utf8编码的bytes或者str对象\n\tdef get_keywords(self, text, keywords_num=5,window=2):\n\t\ttry:\n\t\t\tself.filter(text)\n\t\texcept Exception as e:\n\t\t\treturn []\n\t\tself.tr4w_word.analyze(text=text, lower=True, window=window)\n\t\titems = self.tr4w_word.get_keywords(keywords_num, word_min_len=2)\n\t\t# item.word, item.weight\n\t\treturn items\n\n\tdef get_key_phrases(self, text, keywords_num=3,window=2):\n\t\ttry:\n\t\t\tself.filter(text)\n\t\texcept Exception as e:\n\t\t\treturn []\n\t\tself.tr4w_word.analyze(text=text, lower=True, window=window)\n\t\tphrases = self.tr4w_word.get_keyphrases(keywords_num=keywords_num, min_occur_num= 2)\n\t\treturn phrases\n\n\tdef get_key_sentences(self, text, num=3):\n\t\ttry:\n\t\t\tself.filter(text)\n\t\texcept Exception as e:\n\t\t\treturn []\n\t\t# item.index, item.weight, item.sentence\n\t\tself.tr4s_sent.analyze(text=text, lower=True, source = 'all_filters')\n\t\tsentences = self.tr4s_sent.get_key_sentences(num=num)\n\t\treturn sentences\n\nif __name__ == '__main__':\n\ttr = TextRank()\n\ttext = \"美国是想让中国的核力量发展到他们一样的水平吗?亦或者是美国减到中国核武器的水平?不得不说我国这次的回应十分霸气。我国的核力量可以说是中美俄三国中常备核弹头最少的国家了,\"\n\t# text = \"这也带动了美国民众的反战情绪,这些可是美军的财神爷,美国政府自然得罪不起,因此在尼克松竞选时就承诺结束越南战争,即便打了20年没成功,也还是灰溜溜的回国了。对美军来说,\"\n\tkws = tr.get_keywords(text)\n\n\tfor kw in kws:\n\t\tprint(kw)","sub_path":"src/textrank/textrank.py","file_name":"textrank.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"234837903","text":"\r\n# 그리디(눈으로 풀이가 보임, 아이디어가 떠올라야만 풀수있음)\r\n# 구현 (눈으로 보여, 아이디어도 비교적 잘보여, 근데 코���이 어려워)\r\n\r\n# 구현의 대표적인 문제\r\n\r\n# 하루 24시간중에 (3 이라는 숫자)가 시 분 초에 몇번들어갈까?\r\n# 랜덤으로 x시간을 받았을때 3이라는 숫자가 과연 몇번 들어갈까?\r\n# 새어보는 프로그램을 만들려고해.\r\n\r\n\r\n# 5 = 5시\r\n# 5시 59분 59초\r\n# 한번이라도 3이 들어가면 카운트 1을 해준다.\r\n# 2시 30분 24초 = +1\r\n# 5시 33분 53초 = +1\r\n# 3시 00분 00초 = +1\r\n\r\n# 카운트는 몇개인가요? ?개\r\n\r\n\r\nhour = int(input())\r\n\r\ncount = 0\r\n\r\nfor h in range(hour+1):\r\n for m in range(60):\r\n for s in range(60):\r\n if '3' in str(h) + str(m) + str(s):\r\n count += 1\r\n\r\nprint(count)","sub_path":"문제/그리디 문제/h_m_s.py","file_name":"h_m_s.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"460056927","text":"\"\"\"Module for work with csv files\"\"\"\nimport csv\n\n\nclass Zoo:\n \"\"\"Class to represent zoo\"\"\"\n def __init__(self, file):\n self.animals_allowed = [\"Cat\", \"Dog\", \"Bird\"]\n self.animals_in_zoo = []\n with open(file, \"rt\") as csv_animals:\n for type_, name, weight in csv.reader(csv_animals):\n if type_ in self.animals_allowed:\n self.animals_in_zoo.append(self.add_animal(type_, name, weight))\n\n @staticmethod\n def add_animal(type_: str, name: str, weight: str):\n \"\"\"\n :param type_: type of animal\n :param name: name of animal\n :param weight: weight of animal\n :return: Animal object with specified params\n :raise: ValueError if animal of given type not allowed in zoo\n \"\"\"\n type_ = type_.lower()\n if type_ == \"cat\":\n return Cat(name, weight)\n if type_ == \"dog\":\n return Dog(name, weight)\n if type_ == \"bird\":\n return Bird(name, weight)\n raise ValueError\n\n def animals(self):\n \"\"\"\n :return: list of animals in zoo\n \"\"\"\n return self.animals_in_zoo\n\n\nclass Animal:\n \"\"\"Class representing animal\"\"\"\n def __init__(self, name: str, weight: str):\n try:\n self.weight = float(weight)\n except ValueError:\n raise ValueError\n self.name = name\n self.voice = \"\"\n\n def get_weight(self):\n \"\"\"\n :return: weight of animal\n \"\"\"\n return self.weight\n\n def say(self):\n \"\"\"\n :return: voice of animal\n \"\"\"\n return self.voice\n\n\nclass Cat(Animal):\n \"\"\"Class to represent animal Cat\"\"\"\n def __init__(self, name: str, weight: str):\n super().__init__(name, weight)\n self.voice = \"Meow\"\n\n\nclass Dog(Animal):\n \"\"\"Class to represent animal Dog\"\"\"\n def __init__(self, name: str, weight: str):\n super().__init__(name, weight)\n self.voice = \"Woof\"\n\n\nclass Bird(Animal):\n \"\"\"Class to represent animal Bird\"\"\"\n def __init__(self, name: str, weight: str):\n super().__init__(name, weight)\n self.voice = \"Tweet\"\n","sub_path":"factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"390981397","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# njuguoyi @ 2016-03-10 10:51:41\n\n'''\n排序不支持原生比较的对象\n'''\n\n# 内置的sorted()函数有一个关键字key,可以传入一个callable对象给它。这个callable对象对每个传入的对象返回一个值,该值会被sorted()用来排序这些对象。\nclass User:\n def __init__(self, user_id, last_name, first_name):\n self.user_id = user_id\n self.last_name = last_name\n self.first_name = first_name\n def __repr__(self):\n return 'User({},{},{})'.format(self.user_id, self.last_name, self.first_name)\n\ndef sort_notcompare():\n users = [User(23, 'c', 'a'), User(3, 'a', 'b'), User(99, 'c', 'c')]\n print(sorted(users, key=lambda u: u.user_id))\n print(sorted(users, key=lambda u: (u.last_name, u.first_name)))\n\nsort_notcompare()\n\n# 也可以使用operator.attrgetter()方法来代替lambda函数\n# operator.itemgetter()应用于字典类型\nfrom operator import attrgetter\nusers = [User(23, 'c', 'a'), User(3, 'a', 'b'), User(99, 'c', 'c')]\nprint(sorted(users, key=attrgetter('user_id')))\nprint(sorted(users, key=attrgetter('last_name', 'first_name')))\n","sub_path":"chap3/3.14.py","file_name":"3.14.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"234686440","text":"import time\nimport urllib.request\nimport sys\nimport json\nimport csv\nimport sys\nfrom lib.client import Client, get_api_path\n\n\ndef get_data_yunbi(market_id, date_time):\n \"\"\"Get transaction data from yunbi\n Input: market_id = Market identity(btccny etc.)\n date_time = Current time (e.g 2016-09-28_16:17:33)\"\"\"\n # Date Programmer Descrition of change\n # 2017/04/15 KePu Original code\n # 2017/04/16 KePu 1.update to python3\n \n # Authorization\n\n client = Client(access_key='your_access_key', secret_key ='your_secret_key')\n\n # member = (client.get(get_api_path('members')))\n\n #####################\n # get markets and prepare writer\n ######################\n # markets = client.get(get_api_path('markets'))\n len_trades = 1000\n # Get data from yunbi\n # time_stamp_last = time.time() # Get current time\n\n \n # file_time = time.strftime(\"%Y-%m-%d_%H:%M:%S\",time.localtime(trades[0]['at']))\n time_stamp_last = time.mktime(time.strptime(date_time, \"%Y-%m-%d_%H:%M:%S\"))\n while len_trades == 1000:\n file_time = time.strftime(\"%Y-%m-%d_%H:%M:%S\", time.localtime(time_stamp_last))\n csvname = market_id + '_' + file_time + '.csv'\n csvfile = open(csvname, 'w')\n writer = csv.writer(csvfile)\n title = ['date', 'id', 'price', 'volume', 'funds']\n writer.writerow(title)\n\n \n # Write data into file 1000 record at a time 100000 per file\n for ind_file in range(100):\n trades = client.get(get_api_path('trades'), params={'market': market_id, 'limit' : 1000, 'timestamp': time_stamp_last})\n time_stamp_last = trades[-1]['at'] # Set new time_stamp\n len_trades = len(trades)\n for ind in range(len_trades):\n data_got = [time.strftime(\"%Y-%m-%d_%H:%M:%S\",time.localtime(trades[ind]['at'])), trades[ind]['id'], trades[ind]['price'], trades[ind]['volume'], trades[ind]['funds']]\n writer.writerow(data_got)\n csvfile.close()\n\n \ndef main():\n argument=sys.argv\n get_data_yunbi(argument[1], argument[2])\nif __name__ == '__main__':\n main()\n","sub_path":"get_data_yunbi.py","file_name":"get_data_yunbi.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"446499280","text":"#! /usr/bin/env python3\nimport h5py as h5\nimport numpy as np\nimport os\nimport sharpy.utils.algebra as algebra\n\ncase_name = 'hale'\nroute = os.path.dirname(os.path.realpath(__file__)) + '/'\n\n# EXECUTION\nflow = ['BeamLoader',\n 'AerogridLoader',\n 'StaticTrim',\n 'DynamicCoupled',\n 'BeamLoads'\n ]\n\n# if free_flight is False, the motion of the centre of the wing is prescribed.\nfree_flight = True\nif not free_flight:\n case_name += '_prescribed'\n amplitude = 0 * np.pi / 180\n period = 3\n case_name += '_amp_' + str(amplitude).replace('.', '') + '_period_' + str(period)\n\n# FLIGHT CONDITIONS\n# the simulation is set such that the aircraft flies at a u_inf velocity while\n# the air is calm.\nu_inf = 10\nrho = 1.225\n\n# trim sigma = 1.5\nalpha = 4.31 * np.pi / 180\nbeta = 0\nroll = 0\ngravity = 'on'\ncs_deflection = -2.08 * np.pi / 180\nrudder_static_deflection = 0.0\nrudder_step = 0.0 * np.pi / 180\nthrust = 6.16\nsigma = 1.5\nlambda_dihedral = 20 * np.pi / 180\n\n# gust settings\ngust_intensity = 0.20\ngust_length = 1 * u_inf\ngust_offset = 0.2 * u_inf\n\n# numerics\nn_step = 5\nstructural_relaxation_factor = 0.6\nrelaxation_factor = 0.35\ntolerance = 1e-6\nfsi_tolerance = 1e-4\n\nnum_cores = 2\n\n# MODEL GEOMETRY\n# beam\nspan_main = 16.0\nlambda_main = 0.25\nea_main = 0.3\n\nea = 1e7\nga = 1e5\ngj = 1e4\neiy = 2e4\neiz = 4e6\nm_bar_main = 0.75\nj_bar_main = 0.075\n\nlength_fuselage = 10\noffset_fuselage = 0\nsigma_fuselage = 10\nm_bar_fuselage = 0.2\nj_bar_fuselage = 0.08\n\nspan_tail = 2.5\nea_tail = 0.5\nfin_height = 2.5\nea_fin = 0.5\nsigma_tail = 100\nm_bar_tail = 0.3\nj_bar_tail = 0.08\n\n# lumped masses\nn_lumped_mass = 1\nlumped_mass_nodes = np.zeros((n_lumped_mass,), dtype=int)\nlumped_mass = np.zeros((n_lumped_mass,))\nlumped_mass[0] = 50\nlumped_mass_inertia = np.zeros((n_lumped_mass, 3, 3))\nlumped_mass_position = np.zeros((n_lumped_mass, 3))\n\n# aero\nchord_main = 1.0\nchord_tail = 0.5\nchord_fin = 0.5\n\n# DISCRETISATION\n# spatial discretisation\n# chordiwse panels\nm = 4\n# spanwise elements\nn_elem_multiplier = 2\nn_elem_main = int(4 * n_elem_multiplier)\nn_elem_tail = int(2 * n_elem_multiplier)\nn_elem_fin = int(2 * n_elem_multiplier)\nn_elem_fuselage = int(2 * n_elem_multiplier)\nn_surfaces = 5\n\n# temporal discretisation\nphysical_time = 30\ntstep_factor = 1.\ndt = 1.0 / m / u_inf * tstep_factor\nn_tstep = 20\n\n# END OF INPUT-----------------------------------------------------------------\n\n# beam processing\nn_node_elem = 3\nspan_main1 = (1.0 - lambda_main) * span_main\nspan_main2 = lambda_main * span_main\n\nn_elem_main1 = round(n_elem_main * (1 - lambda_main))\nn_elem_main2 = n_elem_main - n_elem_main1\n\n# total number of elements\nn_elem = 0\nn_elem += n_elem_main1 + n_elem_main1\nn_elem += n_elem_main2 + n_elem_main2\nn_elem += n_elem_fuselage\nn_elem += n_elem_fin\nn_elem += n_elem_tail + n_elem_tail\n\n# number of nodes per part\nn_node_main1 = n_elem_main1 * (n_node_elem - 1) + 1\nn_node_main2 = n_elem_main2 * (n_node_elem - 1) + 1\nn_node_main = n_node_main1 + n_node_main2 - 1\nn_node_fuselage = n_elem_fuselage * (n_node_elem - 1) + 1\nn_node_fin = n_elem_fin * (n_node_elem - 1) + 1\nn_node_tail = n_elem_tail * (n_node_elem - 1) + 1\n\n# total number of nodes\nn_node = 0\nn_node += n_node_main1 + n_node_main1 - 1\nn_node += n_node_main2 - 1 + n_node_main2 - 1\nn_node += n_node_fuselage - 1\nn_node += n_node_fin - 1\nn_node += n_node_tail - 1\nn_node += n_node_tail - 1\n\n# stiffness and mass matrices\nn_stiffness = 3\nbase_stiffness_main = sigma * np.diag([ea, ga, ga, gj, eiy, eiz])\nbase_stiffness_fuselage = base_stiffness_main.copy() * sigma_fuselage\nbase_stiffness_fuselage[4, 4] = base_stiffness_fuselage[5, 5]\nbase_stiffness_tail = base_stiffness_main.copy() * sigma_tail\nbase_stiffness_tail[4, 4] = base_stiffness_tail[5, 5]\n\nn_mass = 3\nbase_mass_main = np.diag([m_bar_main, m_bar_main, m_bar_main, j_bar_main, 0.5 * j_bar_main, 0.5 * j_bar_main])\nbase_mass_fuselage = np.diag([m_bar_fuselage,\n m_bar_fuselage,\n m_bar_fuselage,\n j_bar_fuselage,\n j_bar_fuselage * 0.5,\n j_bar_fuselage * 0.5])\nbase_mass_tail = np.diag([m_bar_tail,\n m_bar_tail,\n m_bar_tail,\n j_bar_tail,\n j_bar_tail * 0.5,\n j_bar_tail * 0.5])\n\n# PLACEHOLDERS\n# beam\nx = np.zeros((n_node,))\ny = np.zeros((n_node,))\nz = np.zeros((n_node,))\nbeam_number = np.zeros((n_elem,), dtype=int)\nframe_of_reference_delta = np.zeros((n_elem, n_node_elem, 3))\nstructural_twist = np.zeros((n_elem, 3))\nconn = np.zeros((n_elem, n_node_elem), dtype=int)\nstiffness = np.zeros((n_stiffness, 6, 6))\nelem_stiffness = np.zeros((n_elem,), dtype=int)\nmass = np.zeros((n_mass, 6, 6))\nelem_mass = np.zeros((n_elem,), dtype=int)\nboundary_conditions = np.zeros((n_node,), dtype=int)\napp_forces = np.zeros((n_node, 6))\n\n# aero\nairfoil_distribution = np.zeros((n_elem, n_node_elem), dtype=int)\nsurface_distribution = np.zeros((n_elem,), dtype=int) - 1\nsurface_m = np.zeros((n_surfaces,), dtype=int)\nm_distribution = 'uniform'\naero_node = np.zeros((n_node,), dtype=bool)\ntwist = np.zeros((n_elem, n_node_elem))\nsweep = np.zeros((n_elem, n_node_elem))\nchord = np.zeros((n_elem, n_node_elem,))\nelastic_axis = np.zeros((n_elem, n_node_elem,))\n\n\n# FUNCTIONS-------------------------------------------------------------\ndef clean_test_files():\n fem_file_name = route + '/' + case_name + '.fem.h5'\n if os.path.isfile(fem_file_name):\n os.remove(fem_file_name)\n\n dyn_file_name = route + '/' + case_name + '.dyn.h5'\n if os.path.isfile(dyn_file_name):\n os.remove(dyn_file_name)\n\n aero_file_name = route + '/' + case_name + '.aero.h5'\n if os.path.isfile(aero_file_name):\n os.remove(aero_file_name)\n\n solver_file_name = route + '/' + case_name + '.sharpy'\n if os.path.isfile(solver_file_name):\n os.remove(solver_file_name)\n\n flightcon_file_name = route + '/' + case_name + '.flightcon.txt'\n if os.path.isfile(flightcon_file_name):\n os.remove(flightcon_file_name)\n\ndef generate_fem():\n stiffness[0, ...] = base_stiffness_main\n stiffness[1, ...] = base_stiffness_fuselage\n stiffness[2, ...] = base_stiffness_tail\n\n mass[0, ...] = base_mass_main\n mass[1, ...] = base_mass_fuselage\n mass[2, ...] = base_mass_tail\n\n we = 0\n wn = 0\n # inner right wing\n beam_number[we:we + n_elem_main1] = 0\n y[wn:wn + n_node_main1] = np.linspace(0.0, span_main1, n_node_main1)\n\n for ielem in range(n_elem_main1):\n conn[we + ielem, :] = ((np.ones((3,)) * (we + ielem) * (n_node_elem - 1)) +\n [0, 2, 1])\n for inode in range(n_node_elem):\n frame_of_reference_delta[we + ielem, inode, :] = [-1.0, 0.0, 0.0]\n\n elem_stiffness[we:we + n_elem_main1] = 0\n elem_mass[we:we + n_elem_main1] = 0\n boundary_conditions[0] = 1\n # remember this is in B FoR\n app_forces[0] = [0, thrust, 0, 0, 0, 0]\n we += n_elem_main1\n wn += n_node_main1\n\n # outer right wing\n beam_number[we:we + n_elem_main1] = 0\n y[wn:wn + n_node_main2 - 1] = y[wn - 1] + np.linspace(0.0, np.cos(lambda_dihedral) * span_main2, n_node_main2)[1:]\n z[wn:wn + n_node_main2 - 1] = z[wn - 1] + np.linspace(0.0, np.sin(lambda_dihedral) * span_main2, n_node_main2)[1:]\n for ielem in range(n_elem_main2):\n conn[we + ielem, :] = ((np.ones((3,)) * (we + ielem) * (n_node_elem - 1)) +\n [0, 2, 1])\n for inode in range(n_node_elem):\n frame_of_reference_delta[we + ielem, inode, :] = [-1.0, 0.0, 0.0]\n elem_stiffness[we:we + n_elem_main2] = 0\n elem_mass[we:we + n_elem_main2] = 0\n boundary_conditions[wn + n_node_main2 - 2] = -1\n we += n_elem_main2\n wn += n_node_main2 - 1\n\n # inner left wing\n beam_number[we:we + n_elem_main1 - 1] = 1\n y[wn:wn + n_node_main1 - 1] = np.linspace(0.0, -span_main1, n_node_main1)[1:]\n for ielem in range(n_elem_main1):\n conn[we + ielem, :] = ((np.ones((3,)) * (we + ielem) * (n_node_elem - 1)) +\n [0, 2, 1])\n for inode in range(n_node_elem):\n frame_of_reference_delta[we + ielem, inode, :] = [1.0, 0.0, 0.0]\n conn[we, 0] = 0\n elem_stiffness[we:we + n_elem_main1] = 0\n elem_mass[we:we + n_elem_main1] = 0\n we += n_elem_main1\n wn += n_node_main1 - 1\n\n # outer left wing\n beam_number[we:we + n_elem_main2] = 1\n y[wn:wn + n_node_main2 - 1] = y[wn - 1] + np.linspace(0.0, -np.cos(lambda_dihedral) * span_main2, n_node_main2)[1:]\n z[wn:wn + n_node_main2 - 1] = z[wn - 1] + np.linspace(0.0, np.sin(lambda_dihedral) * span_main2, n_node_main2)[1:]\n for ielem in range(n_elem_main2):\n conn[we + ielem, :] = ((np.ones((3,)) * (we + ielem) * (n_node_elem - 1)) +\n [0, 2, 1])\n for inode in range(n_node_elem):\n frame_of_reference_delta[we + ielem, inode, :] = [1.0, 0.0, 0.0]\n elem_stiffness[we:we + n_elem_main2] = 0\n elem_mass[we:we + n_elem_main2] = 0\n boundary_conditions[wn + n_node_main2 - 2] = -1\n we += n_elem_main2\n wn += n_node_main2 - 1\n\n # fuselage\n beam_number[we:we + n_elem_fuselage] = 2\n x[wn:wn + n_node_fuselage - 1] = np.linspace(0.0, length_fuselage, n_node_fuselage)[1:]\n z[wn:wn + n_node_fuselage - 1] = np.linspace(0.0, offset_fuselage, n_node_fuselage)[1:]\n for ielem in range(n_elem_fuselage):\n conn[we + ielem, :] = ((np.ones((3,)) * (we + ielem) * (n_node_elem - 1)) +\n [0, 2, 1])\n for inode in range(n_node_elem):\n frame_of_reference_delta[we + ielem, inode, :] = [0.0, 1.0, 0.0]\n conn[we, 0] = 0\n elem_stiffness[we:we + n_elem_fuselage] = 1\n elem_mass[we:we + n_elem_fuselage] = 1\n we += n_elem_fuselage\n wn += n_node_fuselage - 1\n global end_of_fuselage_node\n end_of_fuselage_node = wn - 1\n\n # fin\n beam_number[we:we + n_elem_fin] = 3\n x[wn:wn + n_node_fin - 1] = x[end_of_fuselage_node]\n z[wn:wn + n_node_fin - 1] = z[end_of_fuselage_node] + np.linspace(0.0, fin_height, n_node_fin)[1:]\n for ielem in range(n_elem_fin):\n conn[we + ielem, :] = ((np.ones((3,)) * (we + ielem) * (n_node_elem - 1)) +\n [0, 2, 1])\n for inode in range(n_node_elem):\n frame_of_reference_delta[we + ielem, inode, :] = [-1.0, 0.0, 0.0]\n conn[we, 0] = end_of_fuselage_node\n elem_stiffness[we:we + n_elem_fin] = 2\n elem_mass[we:we + n_elem_fin] = 2\n we += n_elem_fin\n wn += n_node_fin - 1\n end_of_fin_node = wn - 1\n\n # right tail\n beam_number[we:we + n_elem_tail] = 4\n x[wn:wn + n_node_tail - 1] = x[end_of_fin_node]\n y[wn:wn + n_node_tail - 1] = np.linspace(0.0, span_tail, n_node_tail)[1:]\n z[wn:wn + n_node_tail - 1] = z[end_of_fin_node]\n for ielem in range(n_elem_tail):\n conn[we + ielem, :] = ((np.ones((3,)) * (we + ielem) * (n_node_elem - 1)) +\n [0, 2, 1])\n for inode in range(n_node_elem):\n frame_of_reference_delta[we + ielem, inode, :] = [-1.0, 0.0, 0.0]\n conn[we, 0] = end_of_fin_node\n elem_stiffness[we:we + n_elem_tail] = 2\n elem_mass[we:we + n_elem_tail] = 2\n boundary_conditions[wn + n_node_tail - 2] = -1\n we += n_elem_tail\n wn += n_node_tail - 1\n\n # left tail\n beam_number[we:we + n_elem_tail] = 5\n x[wn:wn + n_node_tail - 1] = x[end_of_fin_node]\n y[wn:wn + n_node_tail - 1] = np.linspace(0.0, -span_tail, n_node_tail)[1:]\n z[wn:wn + n_node_tail - 1] = z[end_of_fin_node]\n for ielem in range(n_elem_tail):\n conn[we + ielem, :] = ((np.ones((3,)) * (we + ielem) * (n_node_elem - 1)) +\n [0, 2, 1])\n for inode in range(n_node_elem):\n frame_of_reference_delta[we + ielem, inode, :] = [1.0, 0.0, 0.0]\n conn[we, 0] = end_of_fin_node\n elem_stiffness[we:we + n_elem_tail] = 2\n elem_mass[we:we + n_elem_tail] = 2\n boundary_conditions[wn + n_node_tail - 2] = -1\n we += n_elem_tail\n wn += n_node_tail - 1\n\n with h5.File(route + '/' + case_name + '.fem.h5', 'a') as h5file:\n coordinates = h5file.create_dataset('coordinates', data=np.column_stack((x, y, z)))\n conectivities = h5file.create_dataset('connectivities', data=conn)\n num_nodes_elem_handle = h5file.create_dataset(\n 'num_node_elem', data=n_node_elem)\n num_nodes_handle = h5file.create_dataset(\n 'num_node', data=n_node)\n num_elem_handle = h5file.create_dataset(\n 'num_elem', data=n_elem)\n stiffness_db_handle = h5file.create_dataset(\n 'stiffness_db', data=stiffness)\n stiffness_handle = h5file.create_dataset(\n 'elem_stiffness', data=elem_stiffness)\n mass_db_handle = h5file.create_dataset(\n 'mass_db', data=mass)\n mass_handle = h5file.create_dataset(\n 'elem_mass', data=elem_mass)\n frame_of_reference_delta_handle = h5file.create_dataset(\n 'frame_of_reference_delta', data=frame_of_reference_delta)\n structural_twist_handle = h5file.create_dataset(\n 'structural_twist', data=structural_twist)\n bocos_handle = h5file.create_dataset(\n 'boundary_conditions', data=boundary_conditions)\n beam_handle = h5file.create_dataset(\n 'beam_number', data=beam_number)\n app_forces_handle = h5file.create_dataset(\n 'app_forces', data=app_forces)\n lumped_mass_nodes_handle = h5file.create_dataset(\n 'lumped_mass_nodes', data=lumped_mass_nodes)\n lumped_mass_handle = h5file.create_dataset(\n 'lumped_mass', data=lumped_mass)\n lumped_mass_inertia_handle = h5file.create_dataset(\n 'lumped_mass_inertia', data=lumped_mass_inertia)\n lumped_mass_position_handle = h5file.create_dataset(\n 'lumped_mass_position', data=lumped_mass_position)\n\n\ndef generate_aero_file():\n global x, y, z\n # control surfaces\n n_control_surfaces = 2\n control_surface = np.zeros((n_elem, n_node_elem), dtype=int) - 1\n control_surface_type = np.zeros((n_control_surfaces,), dtype=int)\n control_surface_deflection = np.zeros((n_control_surfaces,))\n control_surface_chord = np.zeros((n_control_surfaces,), dtype=int)\n control_surface_hinge_coord = np.zeros((n_control_surfaces,), dtype=float)\n\n # control surface type 0 = static\n # control surface type 1 = dynamic\n control_surface_type[0] = 0\n control_surface_deflection[0] = cs_deflection\n control_surface_chord[0] = m\n control_surface_hinge_coord[0] = -0.25 # nondimensional wrt elastic axis (+ towards the trailing edge)\n\n control_surface_type[1] = 0\n control_surface_deflection[1] = rudder_static_deflection\n control_surface_chord[1] = 1\n control_surface_hinge_coord[1] = -0. # nondimensional wrt elastic axis (+ towards the trailing edge)\n\n we = 0\n wn = 0\n # right wing (surface 0, beam 0)\n i_surf = 0\n airfoil_distribution[we:we + n_elem_main, :] = 0\n surface_distribution[we:we + n_elem_main] = i_surf\n surface_m[i_surf] = m\n aero_node[wn:wn + n_node_main] = True\n temp_chord = np.linspace(chord_main, chord_main, n_node_main)\n temp_sweep = np.linspace(0.0, 0 * np.pi / 180, n_node_main)\n node_counter = 0\n for i_elem in range(we, we + n_elem_main):\n for i_local_node in range(n_node_elem):\n if not i_local_node == 0:\n node_counter += 1\n chord[i_elem, i_local_node] = temp_chord[node_counter]\n elastic_axis[i_elem, i_local_node] = ea_main\n sweep[i_elem, i_local_node] = temp_sweep[node_counter]\n\n we += n_elem_main\n wn += n_node_main\n\n # left wing (surface 1, beam 1)\n i_surf = 1\n airfoil_distribution[we:we + n_elem_main, :] = 0\n # airfoil_distribution[wn:wn + n_node_main - 1] = 0\n surface_distribution[we:we + n_elem_main] = i_surf\n surface_m[i_surf] = m\n aero_node[wn:wn + n_node_main - 1] = True\n # chord[wn:wn + num_node_main - 1] = np.linspace(main_chord, main_tip_chord, num_node_main)[1:]\n # chord[wn:wn + num_node_main - 1] = main_chord\n # elastic_axis[wn:wn + num_node_main - 1] = main_ea\n temp_chord = np.linspace(chord_main, chord_main, n_node_main)\n node_counter = 0\n for i_elem in range(we, we + n_elem_main):\n for i_local_node in range(n_node_elem):\n if not i_local_node == 0:\n node_counter += 1\n chord[i_elem, i_local_node] = temp_chord[node_counter]\n elastic_axis[i_elem, i_local_node] = ea_main\n sweep[i_elem, i_local_node] = -temp_sweep[node_counter]\n\n we += n_elem_main\n wn += n_node_main - 1\n\n we += n_elem_fuselage\n wn += n_node_fuselage - 1 - 1\n #\n # # fin (surface 2, beam 3)\n i_surf = 2\n airfoil_distribution[we:we + n_elem_fin, :] = 1\n # airfoil_distribution[wn:wn + n_node_fin] = 0\n surface_distribution[we:we + n_elem_fin] = i_surf\n surface_m[i_surf] = m\n aero_node[wn:wn + n_node_fin] = True\n # chord[wn:wn + num_node_fin] = fin_chord\n for i_elem in range(we, we + n_elem_fin):\n for i_local_node in range(n_node_elem):\n chord[i_elem, i_local_node] = chord_fin\n elastic_axis[i_elem, i_local_node] = ea_fin\n control_surface[i_elem, i_local_node] = 1\n # twist[end_of_fuselage_node] = 0\n # twist[wn:] = 0\n # elastic_axis[wn:wn + num_node_main] = fin_ea\n we += n_elem_fin\n wn += n_node_fin - 1\n #\n # # # right tail (surface 3, beam 4)\n i_surf = 3\n airfoil_distribution[we:we + n_elem_tail, :] = 2\n # airfoil_distribution[wn:wn + n_node_tail] = 0\n surface_distribution[we:we + n_elem_tail] = i_surf\n surface_m[i_surf] = m\n # XXX not very elegant\n aero_node[wn:] = True\n # chord[wn:wn + num_node_tail] = tail_chord\n # elastic_axis[wn:wn + num_node_main] = tail_ea\n for i_elem in range(we, we + n_elem_tail):\n for i_local_node in range(n_node_elem):\n twist[i_elem, i_local_node] = -0\n for i_elem in range(we, we + n_elem_tail):\n for i_local_node in range(n_node_elem):\n chord[i_elem, i_local_node] = chord_tail\n elastic_axis[i_elem, i_local_node] = ea_tail\n control_surface[i_elem, i_local_node] = 0\n\n we += n_elem_tail\n wn += n_node_tail\n #\n # # left tail (surface 4, beam 5)\n i_surf = 4\n airfoil_distribution[we:we + n_elem_tail, :] = 2\n # airfoil_distribution[wn:wn + n_node_tail - 1] = 0\n surface_distribution[we:we + n_elem_tail] = i_surf\n surface_m[i_surf] = m\n aero_node[wn:wn + n_node_tail - 1] = True\n # chord[wn:wn + num_node_tail] = tail_chord\n # elastic_axis[wn:wn + num_node_main] = tail_ea\n # twist[we:we + num_elem_tail] = -tail_twist\n for i_elem in range(we, we + n_elem_tail):\n for i_local_node in range(n_node_elem):\n twist[i_elem, i_local_node] = -0\n for i_elem in range(we, we + n_elem_tail):\n for i_local_node in range(n_node_elem):\n chord[i_elem, i_local_node] = chord_tail\n elastic_axis[i_elem, i_local_node] = ea_tail\n control_surface[i_elem, i_local_node] = 0\n we += n_elem_tail\n wn += n_node_tail\n\n with h5.File(route + '/' + case_name + '.aero.h5', 'a') as h5file:\n airfoils_group = h5file.create_group('airfoils')\n # add one airfoil\n naca_airfoil_main = airfoils_group.create_dataset('0', data=np.column_stack(\n generate_naca_camber(P=0, M=0)))\n naca_airfoil_tail = airfoils_group.create_dataset('1', data=np.column_stack(\n generate_naca_camber(P=0, M=0)))\n naca_airfoil_fin = airfoils_group.create_dataset('2', data=np.column_stack(\n generate_naca_camber(P=0, M=0)))\n\n # chord\n chord_input = h5file.create_dataset('chord', data=chord)\n dim_attr = chord_input.attrs['units'] = 'm'\n\n # twist\n twist_input = h5file.create_dataset('twist', data=twist)\n dim_attr = twist_input.attrs['units'] = 'rad'\n\n # sweep\n sweep_input = h5file.create_dataset('sweep', data=sweep)\n dim_attr = sweep_input.attrs['units'] = 'rad'\n\n # airfoil distribution\n airfoil_distribution_input = h5file.create_dataset('airfoil_distribution', data=airfoil_distribution)\n\n surface_distribution_input = h5file.create_dataset('surface_distribution', data=surface_distribution)\n surface_m_input = h5file.create_dataset('surface_m', data=surface_m)\n m_distribution_input = h5file.create_dataset('m_distribution', data=m_distribution.encode('ascii', 'ignore'))\n\n aero_node_input = h5file.create_dataset('aero_node', data=aero_node)\n elastic_axis_input = h5file.create_dataset('elastic_axis', data=elastic_axis)\n\n control_surface_input = h5file.create_dataset('control_surface', data=control_surface)\n control_surface_deflection_input = h5file.create_dataset('control_surface_deflection',\n data=control_surface_deflection)\n control_surface_chord_input = h5file.create_dataset('control_surface_chord', data=control_surface_chord)\n control_surface_hinge_coord_input = h5file.create_dataset('control_surface_hinge_coord',\n data=control_surface_hinge_coord)\n control_surface_types_input = h5file.create_dataset('control_surface_type', data=control_surface_type)\n\n\ndef generate_naca_camber(M=0, P=0):\n mm = M * 1e-2\n p = P * 1e-1\n\n def naca(x, mm, p):\n if x < 1e-6:\n return 0.0\n elif x < p:\n return mm / (p * p) * (2 * p * x - x * x)\n elif x > p and x < 1 + 1e-6:\n return mm / ((1 - p) * (1 - p)) * (1 - 2 * p + 2 * p * x - x * x)\n\n x_vec = np.linspace(0, 1, 1000)\n y_vec = np.array([naca(x, mm, p) for x in x_vec])\n return x_vec, y_vec\n\n\ndef generate_solver_file():\n file_name = route + '/' + case_name + '.sharpy'\n settings = dict()\n settings['SHARPy'] = {'case': case_name,\n 'route': route,\n 'flow': flow,\n 'write_screen': 'off',\n 'write_log': 'off',\n 'log_folder': route + '/output/',\n 'log_file': case_name + '.log'}\n\n settings['NonLinearStatic'] = {'print_info': 'off',\n 'max_iterations': 150,\n 'num_load_steps': 1,\n 'delta_curved': 1e-1,\n 'min_delta': tolerance,\n 'gravity_on': gravity,\n 'gravity': 9.81}\n\n settings['StaticUvlm'] = {'print_info': 'on',\n 'horseshoe': 'off',\n 'num_cores': num_cores,\n 'n_rollup': 0,\n 'rollup_dt': dt,\n 'rollup_aic_refresh': 1,\n 'rollup_tolerance': 1e-4,\n 'velocity_field_generator': 'SteadyVelocityField',\n 'velocity_field_input': {'u_inf': u_inf,\n 'u_inf_direction': [1., 0, 0]},\n 'rho': rho}\n\n settings['StaticCoupled'] = {'print_info': 'off',\n 'structural_solver': 'NonLinearStatic',\n 'structural_solver_settings': settings['NonLinearStatic'],\n 'aero_solver': 'StaticUvlm',\n 'aero_solver_settings': settings['StaticUvlm'],\n 'max_iter': 100,\n 'n_load_steps': n_step,\n 'tolerance': fsi_tolerance,\n 'relaxation_factor': structural_relaxation_factor}\n\n settings['StaticTrim'] = {'solver': 'StaticCoupled',\n 'solver_settings': settings['StaticCoupled'],\n 'initial_alpha': alpha,\n 'initial_deflection': cs_deflection,\n 'initial_thrust': thrust}\n\n settings['NonLinearDynamicCoupledStep'] = {'print_info': 'off',\n 'max_iterations': 950,\n 'delta_curved': 1e-1,\n 'min_delta': tolerance,\n 'newmark_damp': 5e-3,\n 'gravity_on': gravity,\n 'gravity': 9.81,\n 'num_steps': n_tstep,\n 'dt': dt,\n 'initial_velocity': u_inf}\n\n relative_motion = 'off'\n settings['StepUvlm'] = {'print_info': 'off',\n 'num_cores': num_cores,\n 'convection_scheme': 2,\n 'gamma_dot_filtering': 7,\n 'velocity_field_generator': 'GustVelocityField',\n 'velocity_field_input': {'u_inf': int(not free_flight) * u_inf,\n 'u_inf_direction': [1., 0, 0],\n 'gust_shape': '1-cos',\n 'gust_parameters': {'gust_length': gust_length,\n 'gust_intensity': gust_intensity * u_inf},\n 'offset': gust_offset,\n 'relative_motion': relative_motion},\n 'rho': rho,\n 'n_time_steps': n_tstep,\n 'dt': dt}\n settings['BeamLoads'] = {'csv_output': True}\n solver = 'NonLinearDynamicCoupledStep'\n settings['DynamicCoupled'] = {'structural_solver': solver,\n 'structural_solver_settings': settings[solver],\n 'aero_solver': 'StepUvlm',\n 'aero_solver_settings': settings['StepUvlm'],\n 'fsi_substeps': 200,\n 'fsi_tolerance': fsi_tolerance,\n 'relaxation_factor': relaxation_factor,\n 'minimum_steps': 1,\n 'relaxation_steps': 150,\n 'final_relaxation_factor': 0.5,\n 'n_time_steps': n_tstep,\n 'dt': dt,\n 'include_unsteady_force_contribution': 'on',}\n \n settings['BeamLoader'] = {'unsteady': 'on',\n 'orientation': algebra.euler2quat(np.array([roll,\n alpha,\n beta]))}\n\n settings['AerogridLoader'] = {'unsteady': 'on',\n 'aligned_grid': 'on',\n 'mstar': int(20 / tstep_factor),\n 'freestream_dir': ['1', '0', '0'],\n 'wake_shape_generator': 'StraightWake',\n 'wake_shape_generator_input': {'u_inf': u_inf,\n 'u_inf_direction': ['1', '0', '0'],\n 'dt': dt}}\n\n\n import configobj\n config = configobj.ConfigObj()\n config.filename = file_name\n for k, v in settings.items():\n config[k] = v\n config.write()\n\n\nclean_test_files()\ngenerate_fem()\ngenerate_aero_file()\ngenerate_solver_file()\n","sub_path":"tests/coupled/dynamic/hale/generate_hale.py","file_name":"generate_hale.py","file_ext":"py","file_size_in_byte":28263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"39203361","text":"import pandas as pd\nfrom sklearn.decomposition import PCA\nfrom sklearn.cluster import KMeans\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.metrics import silhouette_score\nfrom sklearn.preprocessing import StandardScaler\n\ndataset = pd.read_csv('CC.csv')\n\n##Null values\nnulls = pd.DataFrame(dataset.isnull().sum().sort_values(ascending=False)[:25])\nnulls.columns = ['Null Count']\nnulls.index.name = 'Feature'\n# print(nulls)\n\n##handling the missing value\ndata = dataset.select_dtypes(include=[np.number]).interpolate().dropna()\n\nx_train = dataset.iloc[:,[2,-5,-6]]\n# y = dataset.iloc[:,-1]\n\nscaler = StandardScaler()\nscaler.fit(x_train)\n# Apply transform.\nx_scaler = scaler.transform(x_train)\nX_scaled = pd.DataFrame(x_scaler, columns = x_train.columns)\n\n\nfrom sklearn import metrics\nwcss = []\n# ##elbow method to know the number of clusters\nfor i in range(2,5):\n kmeans = KMeans(n_clusters=i,init='k-means++',max_iter=300,n_init=10,random_state=0)\n kmeans.fit(X_scaled)\n wcss.append(kmeans.inertia_)\n score = silhouette_score(X_scaled, kmeans.labels_, metric='euclidean')\n print(\"For n_clusters = {}, silhouette score is {})\".format(i, score))\n\nplt.plot(range(1,4),wcss)\nplt.title('the elbow method')\nplt.xlabel('Number of Clusters')\nplt.ylabel('Wcss')\nplt.show()\n\npca = PCA(2)\nx_pca = pca.fit_transform(X_scaled)\ndf2 = pd.DataFrame(data=x_pca)\n# finaldf = pd.concat([df2,dataset[['TENURE']]],axis=1)\n# print(finaldf)\n\nfrom sklearn import metrics\nwcss = []\n# ##elbow method to know the number of clusters\nfor i in range(2,5):\n kmeans = KMeans(n_clusters=i,init='k-means++',max_iter=300,n_init=10,random_state=0)\n kmeans.fit(df2)\n wcss.append(kmeans.inertia_)\n score = silhouette_score(df2, kmeans.labels_, metric='euclidean')\n print(\"For n_clusters = {}, silhouette score is {})\".format(i, score))\nplt.plot(range(1, 4), wcss)\nplt.title('the elbow method')\nplt.xlabel('Number of Clusters')\nplt.ylabel('Wcss')\nplt.show()","sub_path":"ICP6/sourcecode/ICP6_3.py","file_name":"ICP6_3.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"361458762","text":"from lxml.html import fromstring\nimport requests\n\n\nclass Parser(object):\n def __init__(self):\n self.html_parser = fromstring\n self.elements = []\n self.tags = ['//p', '//h1', '//h2', '//h3', '//h4', '//h5']\n\n def extract(self, funcs):\n for key, func in funcs.items():\n for el in self.elements:\n if func(el.text):\n yield key, el.text\n\n def feed(self, data):\n for tag in self.tags:\n for el in self.html_parser(data).xpath(tag):\n if el is not None and el.text is not None:\n self.elements.append(el)\n return self\n\n\nclass Crawler(object):\n def crawl(self, urls, funcs):\n for url in urls:\n r = requests.get(url)\n yield r.url, Parser().feed(r.text).extract(funcs)\n\n\n\"\"\"\n
  • \n
    \n\n\"\"\n
    \n

    \nObama proposes tougher consumer rules to protect IRA investors\n

    Los Angeles Times-\n7 hours ago
    \n
    President Obama on Monday proposed tougher regulations on investment brokers who handle retirement funds, saying new rules would limit ...
  • \n\"\"\"\n\n\nclass Google(object):\n def news_search(self, search, pages=1):\n url = 'https://www.google.ca/search?q=%s&tbm=nws' % search\n urls = [url] + ['%s&start=%d' % (url, p*10) for p in range(pages)]\n for page, url in enumerate(urls):\n print('Google news page %s' % page)\n re = requests.get(url)\n for u in self.extract_news_urls(re.text):\n print(u)\n yield u\n print('-'*50)\n\n def extract_news_urls(self, data):\n elements = []\n for tag in ['//h3//a']:\n for el in fromstring(data).xpath(tag):\n if el is not None and el.text is not None:\n elements.append(el)\n for el in elements:\n url = el.get('href').replace('/url?q=', '')\n url = url[:url.find('&sa=U')]\n yield url\n\nif __name__ == '__main__':\n subject = 'iran'\n g = Google()\n urls = g.news_search(subject, 50)\n extractors = {\n subject: lambda x: any(i in x for i in ['iran', 'nuclear', 'Iran', 'Iranian', 'negotiation'])\n # subject: lambda x: any(i in x for i in ['nootropics', 'Nootropics', 'Piracetam', 'Brain'])\n }\n crawler = Crawler()\n with open(subject, 'w+') as sf:\n for url, texts in crawler.crawl(urls, extractors):\n sf.write('%s\\nURL: %s\\n%s\\n' % (('#' * 100), url, ('#' * 100)))\n for sub, text in texts:\n sf.write('%s\\n' % (text.strip()))\n","sub_path":"tools/crawler/news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":4411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"28942481","text":"__author__ = 'ssalka'\n\n\ndef getrequest(dc=None):\n\t\"\"\"\n\tRequests user input for choice of dining hall and meal. Meal selection restricted to\n\tthose available at selected dining hall, e.g. only brunch and dinner at Cafe 3.\n\t\"\"\"\n\n\timport sys\n\timport time\n\n\tdiningcommons = ['Crossroads', 'Foothill', 'Cafe 3', 'Clark Kerr']\n\tday = time.strftime('%a')\n\tflg = 0\n\n\tif dc == None:\n\t\tflg = 1\n\t\tdc = raw_input('Where are you thinking of eating? ').split(' ')\n\n\t\tif dc[0] == 'q': sys.exit()\n\n\t\t# Reformat input, match char cases\n\t\tfor k in range(len(dc)):\n\t\t\tdc[k] = dc[k][0].upper() + dc[k][1:].lower()\n\t\tdc = ' '.join(dc)\n\n\t\tif dc not in diningcommons:\n\t\t\tprint('\\nPlease choose one of:\\nCrossroads, Cafe 3, Clark Kerr, Foothill\\n')\n\t\t\t[dc,meal] = getrequest()\n\n\t# Determine which meals are being served at each hall, depending on day\n\tbk = all([dc != diningcommons[2], day not in ['Sat','Sun']])\n\tbr = any([dc == diningcommons[2], day in ['Sat','Sun']])\n\tlu = all([dc != diningcommons[3], bk==1])\n\tdn = 1\n\n\tavailable = [bk, br, lu, dn]\n\tmeals = ['breakfast', 'brunch', 'lunch', 'dinner']\n\toptions = [available[i]*meals[i] for i in range(4)]\n\toptions = list(filter(('').__ne__, options))\n\n\twhich = 'Do you want ' + ', '.join(options[:-1]) + ' or ' + options[-1] + '? '\n\n\tif flg == 1:\n\t\tmeal = raw_input('Nice. ' + which).lower()\n\telse:\n\t\tmeal = raw_input(which).lower()\n\n\tif meal[0] == 'q': sys.exit()\n\n\tif meal not in options:\n\t\tif meal in meals:\n\t\t\tprint('\\nSorry, ' + meal + ' is not being served at ' + dc + ' today.\\n')\n\t\tprint('Please choose one of:\\n' + ', '.join(options) + '\\n')\n\t\tgetrequest(dc)\n\n\n\n\n\tmeal = meal[0].upper() + meal[1:]\n\n\treturn dc, meal\n\ndef getsummerrequest(dc=None):\n\t\"\"\"\n\tRequests user input for choice of dining hall and meal. Meal selection restricted to\n\tthose available at selected dining hall, e.g. only brunch and dinner at Cafe 3.\n\t\"\"\"\n\n\timport sys\n\timport time\n\n\tdiningcommons = ['Crossroads', 'Foothill', 'Cafe 3', 'Clark Kerr']\n\tday = time.strftime('%a')\n\tflg = 0\n\tflg2 = 0\n\n\tif dc == None:\n\t\tflg = 1\n\t\tdc = raw_input('Note that only Crossroads is open during the summer. Enter \\'y\\' to continue. ').split(' ')\n\n\t\tif dc[0] == 'q': sys.exit()\n\n\t\t# Reformat input, match char cases\n\t\tfor k in range(len(dc)):\n\t\t\tdc[k] = dc[k][0].upper() + dc[k][1:].lower()\n\t\tdc = ' '.join(dc)\n\n\t\tif dc == 'Y':\n\t\t\tdc = 'Crossroads'\n\t\telif dc not in diningcommons:\n\t\t\tprint('\\nPlease choose one of:\\nCrossroads, Cafe 3, Clark Kerr, Foothill\\n')\n\t\t\t[dc,meal] = getsummerrequest()\n\n\t\tif dc in diningcommons[1:4]:\n\t\t\tprint('\\nSorry, only Crossroads is open during the Summer!\\n')\n\t\t\tflg2 = 1\n\t\t\t[dc,meal] = getsummerrequest()\n\n\t# Determine which meals are being served at each hall, depending on day\n\tbk = all([dc != diningcommons[2], day not in ['Sat','Sun']])\n\tbr = any([dc == diningcommons[2], day in ['Sat','Sun']])\n\tlu = all([dc != diningcommons[3], bk==1])\n\tdn = 1\n\n\tavailable = [bk, br, lu, dn]\n\tmeals = ['breakfast', 'brunch', 'lunch', 'dinner']\n\toptions = [available[i]*meals[i] for i in range(4)]\n\toptions = list(filter(('').__ne__, options))\n\n\twhich = 'Do you want ' + ', '.join(options[:-1]) + ' or ' + options[-1] + '? '\n\tif flg2 == 0:\n\t\tif flg == 1:\n\t\t\tmeal = raw_input('Nice. ' + which).lower()\n\t\telse:\n\t\t\tmeal = raw_input(which).lower()\n\n\tif meal[0] == 'q': sys.exit()\n\n\tif meal not in options:\n\t\tif meal in meals:\n\t\t\tprint('\\nSorry, ' + meal + ' is not being served at ' + dc + ' today.\\n')\n\t\tprint('Please choose one of:\\n' + ', '.join(options) + '\\n')\n\t\tgetrequest(dc)\n\n\tmeal = meal[0].upper() + meal[1:]\n\n\treturn dc, meal\n\ndef remove_tags(text):\n\t\"\"\"\n\tRemoves HTML tags from input string.\n\n\t>>> remove_tags('Bold Text')\n\t'Bold Text'\n\n\t>>> remove_tags('')\n\t''\n\n\t>>> remove_tags('Line 1
    Line 2')\n\t'Line 1Line 2'\n\t\"\"\"\n\timport re\n\tTAG_RE = re.compile(r'<[^>]+>')\n\treturn TAG_RE.sub('', text)\n\n# Not used in program\ndef gettext(url):\n\timport urllib\n\tfrom bs4 import BeautifulSoup\n\thtml = urllib.urlopen(url)\n\ttext = BeautifulSoup(html.read())\n\thtml.close()\n\treturn text.get_text()\n\ndef createMatrix(h, w):\n\t\"\"\"Create a zeros matrix of size h*w (rows and columns, respectively)\n\n\t >>> createMatrix(0,0)\n\t []\n\t >>> createMatrix(2,2)\n\t [['0', '0'], ['0', '0']]\n\t >>> createMatrix(4,1)\n\t [['0'], ['0'], ['0'], ['0']]\n\t >>> createMatrix(3,5)\n\t [['0', '0', '0', '0', '0'], ['0', '0', '0', '0', '0'], ['0', '0', '0', '0', '0']]\n\t \"\"\"\n\n\tMatrix = []\n\tfor row in range(h):\n\t\tMatrix.append(['0' for col in range(w)])\n\treturn Matrix\n\n\ndef getColor(row, col, mat):\n\t\"\"\"\n\t Determine whether cell [row,col] of the matrix will be colored black (n=0) or white (n=1)\n\t The input mat refers to a matrix. In the context of the project cellauto1.py, the matrix is created using\n\t the above function, createMatrix(h,w).\n\n\t The output is an integer n in 0:7, the digit of ruleBin that the input cell will be assigned.\n\n\t if case: Should return 7-n, where n = int('000',2) = 0\n\t >>> getColor(1,0,[['0', '0'], ['0', '0']])\n\t 7\n\n\t elif case: Should return 7-n, where n = int('111',2) = 7\n\t >>> getColor(2,2,[['0', '0', '1', '0', '0'], ['0', '1', '1', '1', '0'], ['1', '0', '0', '0', '1']])\n\t 0\n\n\t else case: Should return 7-n, where n = int('100',2) = 4\n\t >>> getColor(1,1,[['1', '0'], ['0', '1']])\n\t 3\n\t \"\"\"\n\tif col == 0:\n\t\tbin3 = '0'.join(mat[row - 1][:2])\t\t\t\t\t# bin3 is the 3-digit binary number above specified cell\n\telif col == len(mat[1])-1:\n\t\tbin3 = ''.join(mat[row - 1][-2:]) + '0'\n\telse:\n\t\tbin3 = ''.join(mat[row - 1][col - 1:col + 2])\n\tn = int(bin3.zfill(8), 2)\n\treturn 7-n\n\nif __name__ == \"__main__\":\n\timport doctest\n\tdoctest.testmod()","sub_path":"fn.py","file_name":"fn.py","file_ext":"py","file_size_in_byte":5584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"75934294","text":"from pyramid.config import Configurator\n\n\ndef main(global_config, **settings):\n\n config = Configurator(settings=settings)\n\n # Models\n config.include('.models')\n\n # Routes\n config.include('.views.api')\n\n config.scan()\n\n return config.make_wsgi_app()\n","sub_path":"lab/lead_crud/lead_crud/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"360759492","text":"\"\"\"\nThis module deals with the configuration file in YAML format\n\"\"\"\nimport copy\nimport os\nimport re\n\nimport yaml\nfrom collections import abc\nfrom typing import Any, Iterable, Mapping, Union\n\n\nclass DataObject:\n def __repr__(self):\n return str({k: v for k, v in vars(self).items()})\n\n def __getitem__(self, item):\n \"\"\"\n Overload on [] operator\n Args:\n item (str): parameter name\n Returns:\n typing.Any: whatever the parameter is holding\n Raises:\n AttributeError: if the item does not exist\n \"\"\"\n if hasattr(self, item):\n return getattr(self, item)\n raise AttributeError(\"No attribute named {} found\".format(item))\n\n\ndef data_to_object(data: Union[Mapping[str, Any], Iterable]) -> object:\n \"\"\"\n Gets a generic list/dict type and transtaforms into a object\n Returns:\n typing.Any: the converted object\n \"\"\"\n if isinstance(data, abc.Mapping):\n r = DataObject()\n for k, v in data.items():\n if type(v) is dict or type(v) is list:\n setattr(r, k, data_to_object(v))\n else:\n setattr(r, k, v)\n return r\n elif isinstance(data, abc.Iterable):\n return [data_to_object(e) for e in data]\n else:\n return data\n\n\nclass Config(object):\n \"\"\"\n Class to deal with config files. You can access the parameters using the method get or with brackets\n \"\"\"\n def __init__(self, filepath=\"config.yaml\"):\n self.filepath = filepath\n if os.path.exists(filepath):\n with open(filepath, \"r\") as stream:\n self.__merge_object(data_to_object(yaml.safe_load(stream)))\n else:\n raise IOError(\"Config file not found on path \" + os.path.abspath(filepath))\n\n def get(self, param_name):\n \"\"\"\n Gets the parameter on config. Returns none if not found\n Args:\n param_name (str): Desired parameter\n Returns:\n typing.Any: Value if found\n Raises:\n AttributeError: if parameter not found\n \"\"\"\n if hasattr(self, param_name):\n return getattr(self, param_name)\n raise AttributeError(\"Parameter {} does not exist\".format(param_name))\n\n def set(self, param_name, param_value):\n \"\"\"\n Sets a value for the configuration. Creates it if does not exist\n Args:\n param_name (str): Parameter name\n param_value (object): Parameter value\n \"\"\"\n setattr(self, param_name, param_value)\n\n def save(self, filepath=None):\n \"\"\"\n Saves the current configuration values to the file\n \"\"\"\n if not filepath:\n filepath = self.filepath\n with open(filepath, \"w\") as stream:\n dic = copy.deepcopy(self.__dict__)\n del dic[\"filepath\"]\n yaml_str = \"\\n\".join([re.sub(r\" ?!!python/.*$\", \"\", l) for l in yaml.dump(dic).splitlines()])\n stream.write(yaml_str)\n\n def __getitem__(self, item):\n \"\"\"\n Overload on [] operator\n Args:\n item (str): parameter name\n Returns:\n typing.Any: whatever the parameter is holding\n Raises:\n AttributeError: If item does not exist\n \"\"\"\n if hasattr(self, item):\n return getattr(self, item)\n raise AttributeError(\"Parameter {} does not exist\".format(item))\n\n def __setitem__(self, key, value):\n \"\"\"\n Overloads the [] operator for set operation\n Args:\n key (str): param name\n value (typing.Any): param value\n \"\"\"\n setattr(self, key, value)\n\n def __merge_object(self, obj):\n \"\"\"\n Used to copy properties from one object to another if there isn't a naming conflict;\n Args:\n obj (typing.Any): object to be merged\n \"\"\"\n for item in obj.__dict__:\n # Check to make sure it can't be called... ie a method.\n # Also make sure the self doesn't have a property of the same name.\n if not callable(obj.__dict__[item]) and not hasattr(self, item):\n setattr(self, item, getattr(obj, item))\n","sub_path":"confyaml/confyaml.py","file_name":"confyaml.py","file_ext":"py","file_size_in_byte":4223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"197855626","text":"import warnings\nimport numpy as np\n\ndef calculate(transition):\n # Correct very small numbers to zero\n transition[np.abs(transition) < 1e-15] = 0\n \n # Calculate eigenvalues and eigenvectors\n eigvalvec = np.linalg.eig(transition.T)\n\n # Get position of eigenvalue, where eigenvalue equals 1\n eigval_pos = int(np.argwhere(np.isclose(eigvalvec[0], 1)))\n\n # Get eigenvectors for chosen eigenvalue and norm it, such that the sum equals 1\n vec = np.abs(eigvalvec[1][:, eigval_pos])\n return vec / np.sum(vec)\n\ndef transition_entropy(transition):\n P = transition\n # Correct very small numbers to zero\n P[np.abs(P) < 1e-15] = 0\n\n # Calculate stationary distribution\n m = calculate(P)\n\n H = 0\n for i in range(np.shape(P)[0]):\n for j in range(np.shape(P)[1]):\n # Only calculate entropy, if P_ij is non zero, otherwise numpy will throw an error\n if P[i,j] != 0:\n H += m[i]*P[i,j]*np.log(P[i,j])\n return -H\n","sub_path":"utils/stationary.py","file_name":"stationary.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"537146735","text":"\"\"\"\nThis problem was asked by Amazon.\n\nImplement a stack that has the following methods:\n\npush(val), which pushes an element onto the stack\npop(), which pops off and returns the topmost element of the stack. If there are no elements in the stack, then it should throw an error or return null.\nmax(), which returns the maximum value in the stack currently. If there are no elements in the stack, then it should throw an error or return null.\nEach method should run in constant time.\n\"\"\"\nfrom utils import get_input_array\n\n\nclass Stack:\n def __init__(self):\n self.store = []\n self.max_list = []\n\n\n def __str__(self):\n return str(self.store)\n\n\n def push(self, val):\n self.store.append(val)\n if (self.max_list and val >= self.max_list[-1]) or not self.max_list:\n self.max_list.append(val)\n\n\n def pop(self):\n if not self.store:\n raise IndexError(\"Stack is empty!\")\n temp = self.store[-1]\n self.store.pop()\n\n if temp == self.max_list[-1]:\n self.max_list.pop()\n\n return temp\n\n\n def max(self):\n if not self.store:\n raise IndexError(\"Stack is empty!\")\n return self.max_list[-1]\n\n\nif __name__ == '__main__':\n array = get_input_array(\"Stack\")\n stack = Stack()\n for val in array:\n stack.push(val)\n\n print(\"{} is maximum\".format(stack.max()))\n print(\"Popped {}\".format(stack.pop()))\n print(\"Popped {}\".format(stack.pop()))\n print(\"Popped {}\".format(stack.pop()))\n print(\"{} is maximum\".format(stack.max()))\n","sub_path":"problems/dcp0043.py","file_name":"dcp0043.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"437509899","text":"'''\nSample code for 2016 ChE 101 HW 3 Prob 3.\n\nPrepared by Kevin Yang\n22 Dec 2015\n'''\n\nimport numpy as np\n\nphi1 = 3.0\nphi2 = 30.0\nX = 0.8\n\nprint (1+phi1)*((phi2+2)*np.log(1-X) + X)/((1+phi2)*((phi1+2)*np.log(1-X) + X))\n\n\n","sub_path":"ps3/prob3_3.py","file_name":"prob3_3.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"336074773","text":"import tensorflow as tf\r\nimport codecs\r\n# X=[[tf,idf,tfidf,tr,intitle,pos]......]\r\n# Y=[0,1,1.....]\r\n\r\n\r\nranks= 6\r\ntopm = 10\r\n\r\ntotalsize=50\r\n\r\ndataname=\"entityOutPut_originCut-pyltp_5006_datacache.txt\"\r\nf=codecs.open(dataname,'r','utf-8')\r\nX=[]\r\nY=[]\r\ndims=ranks+1\r\nlinenum=0\r\nx_news=[]\r\nyy=[]\r\n\r\n\r\nfor line in f.readlines():\r\n linenum+=1\r\n xx=[]\r\n data=line.strip().split(\" \")\r\n for i in range(ranks):\r\n xx.append(float(data[i]))\r\n x_news.append(xx)\r\n yy.append(float(data[ranks]))\r\n if linenum%10==0:\r\n X.append(x_news)\r\n Y.append([yy])\r\n x_news=[]\r\n yy=[]\r\n\r\nprint(X)\r\nprint(Y)\r\n\r\nx = tf.placeholder(tf.float32, shape=(topm, ranks), name=\"x-input\")\r\ny_ = tf.placeholder(tf.float32, shape=(1, 10), name='y-input')\r\n\r\ndef get_weight(shape , lambdaa):\r\n var = tf.Variable(tf.random_normal(shape),dtype=tf.float32)\r\n tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(lambdaa)(var))\r\n return var\r\n\r\n\r\n\r\n# layer_dimension=[2,1]\r\nlayer_dimension=[ranks,128,1024,512,topm]\r\nn_layers=len(layer_dimension)\r\n\r\ncur_layer = x\r\nin_dimension = layer_dimension[0]\r\n\r\nfor i in range(1,n_layers):\r\n out_dimension = layer_dimension[i]\r\n weight = get_weight([in_dimension, out_dimension],0.001)\r\n bias = tf.Variable(tf.constant(0.1, shape=[out_dimension]))\r\n if i != n_layers-1:\r\n cur_layer = tf.nn.relu(tf.matmul(cur_layer, weight)+bias)\r\n else:\r\n cur_layer = tf.nn.sigmoid(tf.matmul(cur_layer, weight) + bias)\r\n in_dimension=layer_dimension[i]\r\n\r\ny=cur_layer\r\n\r\nif __name__ == '__main__':\r\n if __name__ == '__main__':\r\n # cross_entropy_loss = -tf.reduce_mean(tf.multiply(y_,tf.log(y))+tf.multiply(1-y_,tf.log(1-y)))\r\n cross_entropy_loss = -tf.reduce_mean(y_*tf.clip_by_value(y, 1e-12, 1.0))\r\n tf.add_to_collection('losses', cross_entropy_loss)\r\n\r\n loss = tf.add_n(tf.get_collection('losses'))\r\n train_step=tf.train.AdamOptimizer(0.001).minimize(loss)\r\n\r\n loss_result=[]\r\n\r\n print ('start training')\r\n saver = tf.train.Saver()\r\n\r\n with tf.Session() as sess:\r\n init_op = tf.global_variables_initializer()\r\n sess.run(init_op)\r\n STEPS = 10000\r\n for i in range(STEPS):\r\n # start = (i*batch_size) % data_size\r\n # end = min(start+batch_size,data_size)\r\n # start=0\r\n # end=49\r\n # sess.run(train_step,feed_dict={x: X[start:end], y_:Y[start:end]})\r\n # if i%1000 == 0 :\r\n # cur_loss = sess.run(loss,feed_dict={x: X[start:end], y_:Y[start:end]})\r\n # print(\"After %d training steps , current loss is %g .\"%(i,cur_loss))\r\n for j in range(totalsize):\r\n sess.run(train_step, feed_dict={x: X[j], y_: Y[j]})\r\n if STEPS % 1000 == 0:\r\n cur_loss = sess.run(loss, feed_dict={x: X[j], y_: Y[j]})\r\n print(\"After %d training steps , current loss is %g .\" % (i, cur_loss))\r\n if cur_loss<0.01:\r\n break\r\n saver.save(sess,\"./saved_model/my_model_v1\")\r\n\r\n\r\n\r\n\r\n\r\n tf.train.write_graph(sess.graph_def, '../tmp/my-model', 'train.pbtxt')\r\n","sub_path":"code/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"362066710","text":"from base64 import b64encode\n\nfrom django.contrib.auth import get_user_model\nfrom django.core.management.base import BaseCommand, CommandError\nfrom oauth2_provider.models import Application\n\n\nclass Command(BaseCommand):\n help = 'Creates an oauth2-Application that represents a client and is used for its authentication procedure'\n\n def add_arguments(self, parser):\n parser.add_argument('-u', '--username', type=str, required=True)\n parser.add_argument('-a', '--app_name', type=str, required=True)\n parser.add_argument('-c', '--client_type', type=str, default=Application.CLIENT_PUBLIC)\n parser.add_argument('-g', '--grant_type', type=str, default=Application.GRANT_PASSWORD)\n\n def handle(self, *args, **options):\n username = options['username']\n\n User = get_user_model()\n\n try:\n user = User.objects.get(username=username)\n except User.DoesNotExist:\n raise CommandError('User named \"{}\" does not exist'.format(username))\n\n if not user.is_superuser:\n raise CommandError('User named \"{}\" is not a superuser'.format(username))\n\n app_name = options['app_name']\n client_type = options['client_type']\n grant_type = options['grant_type']\n\n try:\n app = Application.objects.get(user=user, name=app_name)\n success_message = '\\nApplication named {app_name} already exists.'.format(app_name=app_name)\n self.stdout.write(self.style.SUCCESS(success_message))\n self._print_app_info(app=app)\n except Application.DoesNotExist:\n app = Application.objects.create(\n user=user, name=app_name, client_type=client_type, authorization_grant_type=grant_type)\n success_message = '\\nCreated application named {app_name}'.format(app_name=app_name)\n self.stdout.write(self.style.SUCCESS(success_message))\n self._print_app_info(app=app)\n\n ### PRIVATE ###\n\n def _print_app_info(self, app: Application):\n def generate_client_token(client_id: str, client_secret: str):\n return b64encode((client_id + ':' + client_secret).encode('utf-8'))\n\n client_id = app.client_id\n client_secret = app.client_secret\n client_token = generate_client_token(client_id, client_secret)\n\n info_message = '\\n'.join((\n '\\n##################################',\n 'Application: {}'.format(app),\n 'client_type: {}'.format(app.client_type),\n 'grant_type: {}'.format(app.authorization_grant_type),\n 'client_id: {}'.format(client_id),\n 'client_secret: {}'.format(client_secret),\n 'client_token(b64): {}'.format(client_token),\n '##################################\\n',\n ))\n\n self.stdout.write(self.style.SUCCESS(info_message))\n","sub_path":"app/source/geo_django_rf/restapi/management/commands/createoauth2app.py","file_name":"createoauth2app.py","file_ext":"py","file_size_in_byte":2856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"83199940","text":"from .RandomMaskingActiveLearner import RandomMaskingActiveLearner\nfrom ..Dtos.Enums.ActiveLearnerType import ActiveLearnerType\nfrom ..Utils.Config import Config\nfrom typing import Set\n\nclass RelationFullMaskingLearner(\n RandomMaskingActiveLearner,\n functionalityType=ActiveLearnerType.RelationFullMaskingLearner\n):\n def __init__(self, initDataSet, config: Config) -> None:\n self.invalidRelationIds: Set[str] = set(\n config.getSetting('InvalidRelationIds')\n )\n\n super().__init__(initDataSet, config)\n\n def _isRelationValid(self, relation: str) -> bool:\n return relation not in self.invalidRelationIds\n\n","sub_path":"main/ActiveLearner/RelationFullMaskingLearner.py","file_name":"RelationFullMaskingLearner.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"400770819","text":"# -*- coding: utf-8 -*-\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:123456@192.168.33.20:3306/test?charset=utf8'\napp.config['SQLALCHEMY_POOL_SIZE'] = 5\napp.config['SQLALCHEMY_MAX_OVERFLOW'] = 10\n\ndb = SQLAlchemy()\ndb.init_app(app)\n\n\nclass Test(db.Model):\n __tablename__ = \"test\"\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n name = db.Column(db.String(10), nullable=True)\n\n @classmethod\n def get_test(cls, t_id):\n t = cls.query.filter(cls.id == t_id).first()\n return t.name\n\n\n@app.teardown_request\ndef teardown_request(e):\n db.session.remove()\n\n\n@app.route(\"/\")\ndef hello():\n name = Test.get_test(t_id=1)\n print(name)\n return \"hello world\"\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n","sub_path":"coding/learn_mysql_connector/sqlalchemy_orm_demo.py","file_name":"sqlalchemy_orm_demo.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"516160070","text":"# coop_routes.py\nimport os\nfrom flask import Flask\nfrom flask import jsonify\nfrom flask import request\nfrom flask_pymongo import PyMongo\n\napp = Flask(__name__)\n\n# Get mongodb configurations\napp.config['MONGO_DBNAME'] = os.getenv('MONGO_DBNAME', 'coopdb')\napp.config['MONGO_URI'] = ('mongodb://%s:%s/restdb',\n os.getenv('PYTHON_COOP_STORE_DB_SERVICE_HOST',\n 'localhost'),\n os.getenv('PYTHON_COOP_STORE_DB_SERVICE_PORT'))\n\nmongo = PyMongo(app)\n\n@app.route('/star', methods=['GET'])\ndef get_all_stars():\n star = mongo.db.stars\n output = []\n for s in star.find():\n output.append({'name' : s['name'], 'distance' : s['distance']})\n return jsonify({'result' : output})\n\n@app.route('/star/', methods=['GET'])\ndef get_one_star(name):\n star = mongo.db.stars\n s = star.find_one({'name' : name})\n if s:\n output = {'name' : s['name'], 'distance' : s['distance']}\n else:\n output = \"No such name\"\n return jsonify({'result' : output})\n\n@app.route('/star', methods=['POST'])\ndef add_star():\n star = mongo.db.stars\n name = request.json['name']\n distance = request.json['distance']\n star_id = star.insert({'name': name, 'distance': distance})\n new_star = star.find_one({'_id': star_id })\n output = {'name' : new_star['name'], 'distance' : new_star['distance']}\n return jsonify({'result' : output})\n\nif __name__ == '__main__':\n app.run(host=os.getenv('PYTHON_COOP_STORE_SERVICE_HOST', '0.0.0.0'),\n port=int(os.getenv('PYTHON_COOP_STORE_SERVICE_PORT_HTTP', 8080)))\n","sub_path":"python-coop-store/coop_routes.py","file_name":"coop_routes.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"418044978","text":"# !/user/bin/env python\n# -*- coding:utf-8 -*- \n\n'''\n项目:bilibili一键签到\n目标网址:https://m.bilibili.com\n'''\n\nimport sys\nimport requests, re\n\n\ndef signin():\n print('*' * 30 + 'bilibili自动签到' + '*' * 30)\n cookie = sys.argv[1]\n #input('请输入您登录百度贴吧后获取的Cookie值:')\n url = 'https://m.bilibili.com/'\n headers = {\n 'Cookie': cookie,\n 'User-Agent':'Mozilla/5.0 (Linux; Android 9; Unspecified Device) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3764.0 Mobile Safari/537.36'\n }\n html = requests.get(url, headers=headers, verify=False)\n print(html)\n\nif __name__ == '__main__':\n signin()\n","sub_path":"Spiders/bs.py","file_name":"bs.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"562283852","text":"import spider.tencent\nimport spider.tencent.error\nimport lxml.html\n\n__all__ = [\"HomePage\", \"PostDetail\"]\n\ndef getElementById(html, id):\n\ttry:\n\t\treturn html.get_element_by_id(id)\n\texcept KeyError:\n\t\treturn None\n\nTOPIC_URL = \"http://\" + spider.tencent.HOSTNAME + \"/k/\"\nTOPIC_URL_LENGTH = len(TOPIC_URL)\n\ndef getPostContent(contentNode):\n\tfor child in contentNode.getchildren():\n\t\tif child.tag == \"a\":\n\t\t\turl = child.get(\"href\")\n\t\t\tif url[0:TOPIC_URL_LENGTH] == TOPIC_URL:\n\t\t\t\tcontinue\n\t\tchild.drop_tree()\n\treturn contentNode.text_content()\n\nclass HomePage:\n\t__HOME_PAGE_URL_LENGTH = len(spider.tencent.HOME_PAGE_URL)\n\t__POST_DETAIL_URL_LENGTH = len(spider.tencent.POST_DETAIL_URL)\n\n\tdef __init__(self, htmlText):\n\t\tself.__html = lxml.html.document_fromstring(htmlText)\n\n\tdef parse(self):\n\t\tuserName = self.__getUserName()\n\t\tpostList, userList = self.__parsePostPanel()\n\t\treturn (userName, postList, userList)\n\n\tdef __getUserName(self):\n\t\ttmp = self.__html.xpath('/html/head/title')\n\t\tif len(tmp) != 1:\n\t\t\traise spider.tencent.error.UnknownTemplate()\n\t\ttext = tmp[0].text.strip()\n\t\tif text[-8:] == '微空间_腾讯微博':\n\t\t\treturn text[:-9]\n\t\telif text[-8:] == '的微博_腾讯微博':\n\t\t\treturn text[:-8]\n\t\telse:\n\t\t\traise spider.tencent.error.UnknownTemplate()\n\n\tdef __parsePostPanel(self):\n\t\tpostList = []\n\t\tuserList = []\n\t\tpanel = getElementById(self.__html, 'talkList')\n\t\tif panel == None:\n\t\t\traise spider.tencent.error.UnknownTemplate()\n\t\tfor node in panel.xpath('./li'):\n\t\t\tpost, user = self.__parseSinglePost(node)\n\t\t\tpostList.extend(post)\n\t\t\tuserList.extend(user)\n\t\treturn (postList, userList)\n\n\tdef __getReplyNode(self, node):\n\t\ttmp = node.xpath('./div[@class=\"msgBox\"]/div[@class=\"replyBox\"]')\n\t\tif len(tmp) == 0:\n\t\t\treturn None\n\t\treturn tmp[0]\n\n\tdef __getContentNode(self, node):\n\t\ttmp = node.xpath('./div[@class=\"msgBox\"]/div[@class=\"msgCnt\"]')\n\t\tif len(tmp) == 0:\n\t\t\treturn None\n\t\treturn tmp[0]\n\n\tdef __getMainPostId(self, replyNode):\n\t\tpath = 'div[@class=\"msgBox\"]/div[@class=\"pubInfo\"]/' \\\n\t\t\t\t+ 'span[@class=\"left\"]/a[@class=\"zfNum\"]/@href'\n\t\ttmp = replyNode.xpath(path)\n\t\tif len(tmp) == 0:\n\t\t\treturn None\n\t\treturn int(self.__removePostDetailUrlPrefix(tmp[0]))\n\n\tdef __hasRepostInfo(self, node):\n\t\tpath = './div[@class=\"msgBox\"]/div[@class=\"pubInfo\"]/' \\\n\t\t\t\t+ 'span[@class=\"left\"]/a[@class=\"zfNum\"]'\n\t\ttmp = node.xpath(path)\n\t\treturn len(tmp) == 1\n\n\tdef __parseSinglePost(self, node):\n\t\tpostList = []\n\t\tuserList = []\n\t\tisMainPost = True\n\n\t\treplyNode = self.__getReplyNode(node)\n\t\tif replyNode != None:\n\t\t\tisMainPost = False\n\t\t\tpostId = self.__getMainPostId(replyNode)\n\t\t\tif postId:\n\t\t\t\tpostList.append((postId,))\n\n\t\tcontentNode = self.__getContentNode(node)\n\t\tif contentNode == None:\n\t\t\treturn (postList, userList)\n\n\t\tfor homePageUrl in contentNode.xpath('em/a/@href'):\n\t\t\tuser = self.__removeHomePageUrlPrefix(homePageUrl)\n\t\t\tif user:\n\t\t\t\tuserList.append(user)\n\n\t\tid = int(node.get(\"id\"))\n\n\t\tif isMainPost and self.__hasRepostInfo(node):\n\t\t\tpostList.append((id,))\n\t\t\treturn (postList, userList)\n\n\t\tcontent = getPostContent(contentNode)\n\t\tposition = content.find('||')\n\t\tif position >= 0:\n\t\t\tcontent = content[0:position].strip()\n\t\telse:\n\t\t\tcontent = content.strip()\n\t\tif content:\n\t\t\tpostList.append((id, content))\n\n\t\treturn (postList, userList)\n\n\tdef __removeHomePageUrlPrefix(self, string):\n\t\tif string[0:HomePage.__HOME_PAGE_URL_LENGTH] \\\n\t\t\t\t!= spider.tencent.HOME_PAGE_URL:\n\t\t\treturn None\n\t\treturn string[HomePage.__HOME_PAGE_URL_LENGTH:]\n\n\tdef __removePostDetailUrlPrefix(self, string):\n\t\tif string[0:HomePage.__POST_DETAIL_URL_LENGTH] \\\n\t\t\t\t!= spider.tencent.POST_DETAIL_URL:\n\t\t\treturn None\n\t\treturn string[HomePage.__POST_DETAIL_URL_LENGTH:]\n\nclass PostDetail:\n\tdef __init__(self, htmlText, postId):\n\t\tself.__html = lxml.html.document_fromstring(htmlText)\n\t\tself.__postId = postId\n\n\tdef parse(self, retrieveContent=False):\n\t\tif retrieveContent:\n\t\t\tcontent = self.__getMainPost()\n\t\t\tif content != None:\n\t\t\t\treturn (self.__getUserList(), self.__getNextUrl(),\n\t\t\t\t\tcontent[0], content[1])\n\t\treturn (self.__getUserList(), self.__getNextUrl())\n\n\tdef __getMainPost(self):\n\t\tbox = getElementById(self.__html, str(self.__postId))\n\t\tif box == None:\n\t\t\treturn None\n\t\tcontentNode = box.xpath(\n\t\t\t\t'div[@class=\"msgBox orginMsg1\"]/div[@class=\"msgCnt\"]')\n\t\tuserHome = box.xpath(\n\t\t\t\t'div[@class=\"msgBox orginMsg1\"]/div[@class=\"userName\"]/@rel')\n\t\tif len(contentNode) == 0 or len(userHome) == 0:\n\t\t\treturn None\n\t\treturn (getPostContent(contentNode[0]), userHome[0])\n\t\n\tdef __getUserList(self):\n\t\treplyPanel = getElementById(self.__html, \"replyList\")\n\t\tif replyPanel == None:\n\t\t\treturn []\n\t\tpath = 'li/div[@class=\"msgBox\"]/div[@class=\"userName\"]/@rel'\n\t\treturn replyPanel.xpath(path)\n\n\tdef __getNextUrl(self):\n\t\tnavigator = getElementById(self.__html, \"pageNav\")\n\t\tif navigator == None:\n\t\t\treturn None\n\t\tnext = navigator.xpath('a[@class=\"pageBtn\"]')\n\t\tif len(next) == 0:\n\t\t\treturn None\n\t\tnext = next[-1]\n\t\ttext = next.text_content().strip()\n\t\tif text == \"下一页>>\" or text == \"Next>>\":\n\t\t\treturn next.get(\"href\")\n\t\telse:\n\t\t\treturn None","sub_path":"extras/weibo-spider/spider/tencent/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":5048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"274353956","text":"import json\nimport sys\n\n#Bring in the params from hmmmodel.txt\nwith open(\"hmmmodel.txt\") as file:\n params = json.load(file)\n\nq0=params['q0']\nqf=params['qf']\ntransition_matrix=params['transition_matrix']\nemmission_matrix_2=params['emmission_matrix'] #{'VB':{'cat':2,'dog':7}, 'NN':['cat':5,..],...}\nvocabulary=params['vocabulary']\nemmission_matrix=params['wordsAskeys_emmission_matrix'] #{'cat':{VB:2, NN:3},'dog':{VB:7, IN:3},....}\ntag_counts = params['tag_counts']\n\n\n\n#read in the test file\ninput_path=sys.argv[1]\n\nfile = open(input_path, 'r')\n\nlines=file.readlines()\nfile.close()\n\n#Create new txt file\ntext_file = open(\"hmmoutput.txt\", \"w\")\n\nsentence_counts=0\n\nfor line in lines:\n sentence_counts +=1\n #print('\\nSTARTING A NEW SENTENCE!!!!!!!!!!!!!!!!!!!!!!\\n')\n #print(sentence_counts)\n #print(line)\n line=line.rstrip() #Get rid of newline characters\n words = line.split(' ') #Put all words in the line into a list\n\n \n counter=1 #so that we know the first word and the last word\n last_word = len(words) #How many words are there? if 8 words in the sentence, this will be 8\n probabilities = dict()\n \n for word in words:\n if word in vocabulary:\n emmissions = emmission_matrix[word] #just the emmissions for the word we are looking at\n else:\n emmissions = None\n \n #######What we do for the first word##############\n if counter == 1: #If we are looking at the first word\n if emmissions is None:\n for a,b in q0.items(): #If the word isnt in the vocab, then just use the initial probabilities\n probabilities[(a,)] = b\n else:\n for a,b in emmissions.items(): #Otherwise multiply the first emmission state by the possible transitions\n try:\n probabilities[(a,)] = b * q0[a]\n except:\n probabilities[(a,)] = 0\n\n \n \n ########What we do for the remaining words########### \n elif counter < last_word+1:\n if emmissions is None:\n new_probabilities = dict()\n for a,b in probabilities.items():\n initial_tag = a[-1]\n possible_transitions = transition_matrix[initial_tag]\n #######################v3 Addition Here#######################\n condense=list()\n for r,s in possible_transitions.items():\n condense.append((s,r))\n condense.sort(reverse=True)\n if len(condense) < 8:\n condensed=condense\n else:\n condensed=condense[:7]\n \n \n \n #######################End of V3 Addition#####################\n \n for j,k in condensed:\n prob = j\n new_probabilities[a+(k,)] = prob * b #Create a new tuple entry in probabilities dictionary which is the prob calculated by b (the probability already there)\n ############# I changed the above a bit toooo###################\n \n #print('\\nPROBABILITIES:',probabilities)\n #Update the dictionary\n trimmed_probabilities = dict()\n possible_enders = list()\n\n\n for seq, pr in new_probabilities.items():\n possible_enders.append(seq[-1])\n\n possible_enders = list(set(possible_enders))\n\n\n for g in possible_enders:\n temp=list()\n for seq, pr in new_probabilities.items():\n if seq[-1] == g:\n temp.append((pr,seq))\n\n temp.sort(reverse=True)\n best_seq=temp[0][1]\n best_prob=temp[0][0]\n\n\n trimmed_probabilities[best_seq]=best_prob\n \n \n \n \n probabilities = trimmed_probabilities\n \n \n else:\n\n new_probabilities = dict() #New dictionary because we iterate through probabilities dict later and need this\n\n #Create a list of the possible tags for that word\n possible_tags = list() #A list of the tags that the word has emmission probabilities for\n for m,n in emmissions.items():\n possible_tags.append(m)\n \n\n for a,b in probabilities.items():\n initial_tag = a[-1]\n possible_transitions = transition_matrix[initial_tag]\n \n ###############Added to fix the unseen transition issue##############\n checker=list()\n for c,d in possible_transitions.items():\n checker.append(c)\n \n for e in possible_tags:\n if e not in checker:\n possible_transitions[e]=1 / tag_counts[e]\n ###############End of my addition################################### \n\n #Go through the transitions to see if any of the new tags can be applied to the word at hand\n for j,k in possible_transitions.items():\n if j in possible_tags:\n prob = k * emmissions[j]\n new_probabilities[a+(j,)] = prob * b #Create a new tuple entry in probabilities dictionary which is the prob calculated by b (the probability already there)\n \n #print('\\nPROBABILITIES:',probabilities)\n #update the dictionary ###Get rid of the dupicate ends tags with lower probability\n trimmed_probabilities = dict()\n possible_enders = list()\n\n\n for seq, pr in new_probabilities.items():\n possible_enders.append(seq[-1])\n\n possible_enders = list(set(possible_enders))\n\n\n for g in possible_enders:\n temp=list()\n for seq, pr in new_probabilities.items():\n if seq[-1] == g:\n temp.append((pr,seq))\n\n temp.sort(reverse=True)\n best_seq=temp[0][1]\n best_prob=temp[0][0]\n\n\n trimmed_probabilities[best_seq]=best_prob\n \n \n \n \n probabilities = trimmed_probabilities\n\n \n counter += 1 \n \n #print('\\nPROBABILITIES:',probabilities) \n #Calculate qf, the end state\n final_probabilities = dict()\n for a,b in probabilities.items():\n qf_tag = a[-1]\n try:\n final_prob = qf[qf_tag] * b\n except: #If the tag does not exist in our qf dictionary we give the probability to be 0\n final_prob = b * (1/tag_counts[qf_tag]) #This was my v3 change\n \n final_probabilities[a] = final_prob\n \n #find the most likely sequence by sorting the dictionary from probabilities largest to smallest\n sorter=list()\n for a,b in final_probabilities.items():\n sorter.append((b,a))\n\n sorter.sort(reverse=True)\n #print('SORTED:',sorter)\n sequence = sorter[0][1]\n sequence = list(sequence)\n \n \n #Create line to be written to the text file:\n tagged_sentence=''\n for i in range(len(words)):\n tagged = words[i]+'/'+sequence[i]+' '\n tagged_sentence=tagged_sentence + tagged\n \n text_file.write(tagged_sentence+'\\n')\n \n \n \n \n \ntext_file.close() \n \n ","sub_path":"Code/hmmdecode.py","file_name":"hmmdecode.py","file_ext":"py","file_size_in_byte":7978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"443688042","text":"import numpy as np\nimport constants\n\n\nclass EvilProducts:\n def __init__(self, DNA, initalFitness=None):\n self.DNA = DNA\n self.fitness = initalFitness\n\n def setFitness(self, fitness):\n self.fitness = fitness\n\n def getFitness(self):\n return self.fitness\n\n def mutate(self, mutationRate, workstationsJson, workstationTypes):\n if np.random.random() < mutationRate:\n p = np.random.randint(0, len(self.DNA))\n i = np.random.randint(0, constants.PRODUCTS_PATH_LENGTH)\n workstationTypeIndex = np.random.randint(len(workstationsJson['workStations']))\n first = self.DNA[p][2][:i]\n change = workstationsJson['workStations'][workstationTypeIndex]['type']\n last = self.DNA[p][2][i + 1:]\n self.DNA[p] = (self.DNA[p][0], self.DNA[p][1], first + change + last)\n\n return self\n\n @staticmethod\n def recombine(ancestor1, ancestor2):\n new_Evil_Products = EvilProducts(list(ancestor1.DNA))\n for i in range(len(new_Evil_Products.DNA)):\n if np.random.random() < 0.5:\n new_Evil_Products.setFitness(0)\n new_Evil_Products.DNA[i] = (new_Evil_Products.DNA[i][0], new_Evil_Products.DNA[i][1], ancestor2.DNA[i][2])\n return new_Evil_Products\n","sub_path":"EvilProducts.py","file_name":"EvilProducts.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"52282729","text":"from django import forms\n\nclass AudioFilesForm(forms.Form):\n def __init__(self, *args, **kwargs):\n files = kwargs.pop('files')\n super(AudioFilesForm, self).__init__(*args, **kwargs)\n counter = 1\n for q in files:\n #self.fields['files-' + str(counter)] = forms.CharField(label='file')\n self.fields[str(q)] = forms.BooleanField(required=False)\n counter += 1\n\n\n","sub_path":"app/upload/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"307859542","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nfrom linuxnano.flags import TestingFlags\n\nclass MessageBox(QtWidgets.QMessageBox):\n def __init__(self, text, *argv):\n\n app = QtWidgets.QApplication.instance()\n\n if app is None:\n print('\\nError: ',text)\n for arg in argv:\n print(arg)\n\n else:\n super().__init__()\n self.setText(text)\n self.setStandardButtons(QtWidgets.QMessageBox.Ok)\n btn = self.button(QtWidgets.QMessageBox.Ok)\n\n detailed_text = \"\"\n\n for arg in argv:\n try:\n detailed_text += str(arg)\n\n except Exception as e:\n print(e)\n\n if detailed_text is not \"\":\n self.setDetailedText(detailed_text)\n\n\n if TestingFlags.AUTO_CLOSE_MESSAGE_BOX:\n QtCore.QTimer.singleShot(0, btn.clicked)\n self.exec_()\n","sub_path":"linuxnano/message_box.py","file_name":"message_box.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"414366319","text":"from fpdf import FPDF\nfrom PIL import Image\n\n\ndef makePdf(pdfFileName, listPages, dir=''):\n \"Takes filename and number of pages abd creates pdf\"\n if (dir):\n dir += \"/\"\n\n cover = Image.open(dir + str(listPages[0]))\n width, height = cover.size\n\n pdf = FPDF(unit=\"pt\", format=[width, height])\n\n for page in listPages:\n pdf.add_page()\n pdf.image(dir + str(page), 0, 0)\n\n pdf.output(dir + pdfFileName + \".pdf\", \"F\")\n\n\nimport os.path\nx = [f for f in os.listdir() if f.endswith(\".jpg\")]\nprint(x)\ny = len(x)\n\n\nnomefile = input(\"Nomedelfile: \")\n\nmakePdf(nomefile, x)\n","sub_path":"jpgpdf.py","file_name":"jpgpdf.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"19996655","text":"from django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login\nfrom user.forms.profile_form import ProfileForm, UserRegisterForm\nfrom user.models import Profile, SearchHistory\nfrom cart.forms.paymentForm import PaymentForm\nfrom cart.models import Customer, Payment\nfrom products.models import Product\nfrom django.http import JsonResponse\n# Create your views here.\n\n\ndef register(request):\n if request.method != 'POST':\n return render(request, 'user/register.html', {\n 'form': UserRegisterForm()\n })\n if request.method == 'POST':\n form = UserRegisterForm(data=request.POST)\n if form.is_valid():\n task = form.save()\n user = Profile()\n user.user = User.objects.get(id=task.id)\n user.profileImage = 'https://d1nhio0ox7pgb.cloudfront.net/_img/o_collection_png/green_dark_grey/512x512/plain/user.png'\n user.firstName = form.cleaned_data['first_name']\n user.lastName = form.cleaned_data['last_name']\n user.save()\n return redirect('login')\n return render(request, 'user/register.html',{\n 'form': UserRegisterForm()\n })\n\n\ndef profile(request):\n # Edit profile\n profile = Profile.objects.filter(user=request.user).first()\n if request.method == 'POST':\n form = ProfileForm(instance=profile, data=request.POST)\n\n if form.is_valid():\n profile = form.save(commit=False)\n profile.user = request.user\n profile.save()\n user = Profile.objects.get(user=request.user.id)\n user = user.user\n user.first_name = request.POST['firstName']\n user.last_name = request.POST['lastName']\n user.save()\n return redirect('profile')\n return render(request, 'user/profile.html', {\n 'form': ProfileForm(instance=profile),\n 'profile': profile\n })\n\n\ndef history(request):\n idList = []\n newDict = {}\n for x in SearchHistory.objects.filter(user_id=request.user.id).order_by('-time')[:10]:\n y = x.time\n x = x.url\n x = x.split('/')\n x = x[-1]\n idList.append(int(x))\n newDict[Product.objects.filter(id=x)] = y\n # newList.append(y)\n print(newDict)\n context = {'SearchHistory': SearchHistory.objects.filter(user_id=request.user.id).order_by('-time')[:10],\n 'products': newDict\n }\n return render(request, 'user/history.html', context)\n\ndef loginIn(request):\n if request.method == 'GET':\n return render(request, 'user/login.html')\n if request.method == 'POST':\n\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('homeIndex')\n else:\n return render(request, 'user/login.html')\n\n\n\ndef payment(request):\n month = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12']\n year = [21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33]\n try:\n info = Payment.objects.get(user_id=request.user.customer.id)\n except:\n info = ''\n try:\n customer = request.user.customer\n except:\n device = request.COOKIES['device']\n if request.user.is_active:\n customer, created = Customer.objects.get_or_create(user=request.user, name=request.user.username)\n else:\n customer, created = Customer.objects.get_or_create(device=device)\n if request.method == 'POST':\n try:\n form = PaymentForm(request.POST)\n print(request.user)\n print(request.user.customer.id)\n pay = Payment.objects.get(user_id=request.user.customer.id)\n print(pay)\n print(pay.id)\n form.save(commit=False)\n myPayment = Payment(id=pay.id, user=request.user.customer, cardOwner=form.data['cardOwner'],\n cardNumber=form.data['cardNumber'],\n expirationDateMonth=form.data['expirationDateMonth'],\n expirationDateYear=form.data['expirationDateYear'],\n cvc=form.data['cvc'])\n myPayment.save(force_update=True)\n return redirect('payment12')\n except:\n form = PaymentForm(request.POST)\n if form.is_valid():\n paymentform = form.save(commit=False)\n paymentform.user = customer\n paymentform.save()\n return render(request, 'cart/index.html')\n return render(request, 'user/payment12.html', {\n 'form': PaymentForm(),\n 'year': year,\n 'month': month,\n 'info': info\n })\n\ndef names(request):\n username = list(User.objects.all().values('username'))\n return JsonResponse({'data': username})","sub_path":"user/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"642439511","text":"from tkinter import *\nfrom PIL import ImageTk, Image\n\n\n\nclass GUI:\n def __init__(self, book_service, client_service, rental_service):\n self.root = Tk()\n self.book_service = book_service\n self.client_service = client_service\n self.rental_service = rental_service\n\n def create_gui(self):\n\n my_image = ImageTk.PhotoImage(Image.open('C:\\\\Users\\\\Maria\\\\Desktop\\\\library1.png'))\n main_label = Label(self.root, image=my_image, height=899, width=989)\n main_label.pack()\n\n book_button = Button(main_label, text='Books', command=lambda: self.books_gui(main_label))\n book_button.place(height=70, width=160, relx=0.1, rely=0.3)\n\n client_button = Button(main_label, text='Client')\n client_button.place(height=70, width=160, relx=0.4, rely=0.3)\n\n rental_button = Button(main_label, text='Rentals')\n rental_button.place(height=70, width=160, relx=0.7, rely=0.3)\n\n undo_button = Button(main_label, text='UNDO')\n undo_button.place(height=90, width=90, relx=0.1, rely=0.52)\n\n redo_button = Button(main_label, text='REDO')\n redo_button.place(height=90, width=90, relx=0.4, rely=0.52)\n\n self.root.mainloop()\n\n def books_gui(self, parent):\n book_frame = Frame(parent)\n book_frame.place(height=899, width=989, relx=0.0001, rely=0.0001)\n #my_image = ImageTk.PhotoImage(Image.open('C:\\\\Users\\\\Maria\\\\Desktop\\\\library2.png'))\n #books_label = Label(self.root, image=my_image, height=700, width=675)\n #books_label.pack()\n\n\n\n add_button = Button(books_label, text='add a book')\n add_button.place(height=40, width=70, relx=0.08, rely=0.1)\n\n remove_button = Button()\n remove_button.place()\n\n update_button = Button()\n update_button.place()\n\n list_all = Button()\n list_all.place()\n\n exit_button = Button(books_label, text='exit', bg='red', command=lambda: books_label.destroy())\n exit_button.place(height=30, width=50, relx=0.9, rely=0.9)\n\n\n\n\n\n def clients_gui(self):\n pass\n\n def rentals_gui(self):\n pass\n\n\n\n","sub_path":"HW6789 - Library Data Base/UI/GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"231353654","text":"import os\nimport io\nimport uuid\nimport sys\nimport cv2\nimport base64\nimport logging\nimport glob\nimport numpy as np\nfrom PIL import Image\nfrom io import BytesIO\nfrom flask import Flask, render_template, make_response, flash\nimport flask\nimport paddlehub as hub\n\napp = Flask(__name__)\n\n#run_with_ngrok(app) #starts ngrok when the app is run\n\ndef convert_bytes_to_image(img_name,img_bytes):\n #将bytes结果转化为字节流\n bytes_stream = BytesIO(img_bytes)\n #读取到图片\n roiimg = Image.open(bytes_stream)\n img_path = os.path.join('./input', img_name + \".jpg\")\n imgByteArr = BytesIO() #初始化一个空字节流\n roiimg.save(imgByteArr,format('PNG')) #把我们得图片以‘PNG’保存到空字节流\n imgByteArr = imgByteArr.getvalue() #无视指针,获取全部内容,类型由io流变成bytes。\n with open(img_path,'wb') as f:\n f.write(imgByteArr)\n\n return img_path\n\n\ndef paddl(input_img_path):\n model = hub.Module(name='animegan_v1_hayao_60', use_gpu=False)\n\n # 模型预测\n result = model.style_transfer(\n images=None,\n paths=[input_img_path],\n batch_size=1,\n output_dir='output',\n visualization=True,\n min_size=32,\n max_size=512\n )\n file = glob.glob(r\"./output/*.jpg\")\n for i in file:\n path = i\n return path \n\n\n\n@app.route('/')\n@app.route('/api', methods=[\"POST\", \"GET\"])\ndef api():\n try:\n img = flask.request.files[\"image\"].read()\n img_name = str(uuid.uuid4())\n input_img_path = convert_bytes_to_image(img_name,img)\n image_save = paddl(input_img_path)\n with open(image_save, 'rb') as f:\n res = base64.b64encode(f.read())\n return res\n except Exception as e:\n logging.error(e)\n return \"errorError occurred, please check the log output!\"\n\n\nif __name__ == \"__main__\":\n app.run(debug=False, host='0.0.0.0', port=int(os.environ.get('PORT', 8080)))","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"14309505","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nePS4M.py\n\nCreated by Paul Fudal on 2014-01-30.\nCopyright (c) 2014 INRIA. All rights reserved.\n\"\"\"\nfrom __future__ import print_function\n\nimport requests\nimport time\nimport subprocess\nimport threading\nfrom threading import Thread\n\nfrom netaddr import IPNetwork\nimport netifaces as ni\n\nHIDDENPAGE_ = \"/hidden.htm?\"\nGLOBALSTATUS = {'On': 0, 'Off' : 1, 'Restart' : 2, 'Rst' : 2}\n\ndef test_ip_mac_address(ip_address, mac_address):\n \"\"\"Tests if real ips MAC address is equals to the given MAC address\"\"\"\n ret = subprocess.Popen([\"sudo\", \"nmap\", \"-sP\", \"-n\", ip_address],\n stdout=subprocess.PIPE).stdout.read()\n ret_lines = ret.split('\\n')[2:-2]\n mac = ret_lines[2].split(' ')[2].lower()\n return mac == mac_address\n\ndef test_ip(ip_address):\n \"\"\"Tests a http request at the specified ip on port 80\"\"\"\n rep = requests.get('http://' + ip_address + HIDDENPAGE_)\n return rep.status_code == 200\n\ndef update_function(powerswitch):\n \"\"\"Function called by the thread to update the power switch status\"\"\"\n while powerswitch.run_updater == True:\n powerswitch.update_status()\n time.sleep(0.5)\n\ndef search_on_network(mac_address):\n \"\"\"Searches for the ip address on the given MAC address\"\"\"\n mac_dict = {}\n ifaces = ni.interfaces()\n for iface in ifaces:\n if not (iface.startswith('en') or iface.startswith('eth')):\n continue\n ifa = ni.ifaddresses(iface)\n try:\n ip_addr = ifa[2][0]['addr']\n mask = ifa[2][0]['netmask']\n prlen = IPNetwork(ip_addr + '/' + mask).prefixlen\n ret = subprocess.Popen([\"sudo\", \"nmap\", \"-sP\", \"-n\",\n str(ip_addr) + \"/\" + str(prlen)],\n stdout=subprocess.PIPE).stdout.read()\n ret_lines = ret.split('\\n')[2:-4]\n count = 0\n while count < len(ret_lines):\n ip_s = ''\n mac = ''\n if ret_lines[count].startswith('Nmap'):\n ip_s = ret_lines[count].split(' ')[4]\n count = count + 1\n if ret_lines[count].startswith('Host'):\n count = count + 1\n if ret_lines[count].startswith('MAC'):\n mac = ret_lines[count].split(' ')[2].lower()\n count = count + 1\n mac_dict[mac] = ip_s\n except KeyError:\n continue\n return mac_dict[mac_address]\n\nclass Eps4m(object):\n \"\"\"Class defining a power switch\"\"\"\n def __init__(self, mac_address=None, ip_address=None):\n self.status = {}\n self.lock = threading.Lock()\n if ip_address == None and mac_address == None:\n raise ValueError(\"MAC address and IP are both equals to none.\")\n elif ip_address == None and mac_address != None:\n self.addr = search_on_network(mac_address)\n elif ip_address != None and mac_address == None:\n if test_ip(ip_address):\n self.addr = ip_address\n else:\n raise ValueError(\"IP address doesn't seem to be a http server.\")\n else:\n if test_ip_mac_address(ip_address, mac_address):\n self.addr = ip_address\n else:\n self.addr = search_on_network(mac_address)\n self.update_status()\n self.updater = Thread(target=update_function, args={self,})\n self.run_updater = True\n self.updater.daemon = True\n self.updater.start()\n\n # def __exit__(self, type, value, traceback):\n # self.run_updater = False\n # self.updater.join()\n\n def update_status(self):\n \"\"\"Updates the current status off the power switch\"\"\"\n self.lock.acquire()\n self._get_current_status()\n self.lock.release()\n\n def print_status(self):\n \"\"\"Prints the current status of the power switch\"\"\"\n self.lock.acquire()\n print(self.status)\n self.lock.release()\n\n def set_on(self, port):\n \"\"\"Puts the given port of the power switch on\"\"\"\n self._request(port, '=On')\n\n def is_on(self, port):\n \"\"\"Returns true if given port is on\"\"\"\n self.update_status()\n return self.status[port] == 0\n\n def is_off(self, port):\n \"\"\"Returns true if given port is off\"\"\"\n self.update_status()\n return self.status[port] == 1\n\n def is_restarting(self, port):\n \"\"\"Returns true if given port is restarting\"\"\"\n self.update_status()\n return self.status[port] == 2\n\n def set_off(self, port):\n \"\"\"Puts the given port of the power switch off\"\"\"\n self._request(port, '=Off')\n\n def restart(self, port):\n \"\"\"Restarts the given port of the power switch\"\"\"\n self._request(port, '=Restart')\n\n def restart_in(self, port, time_s):\n \"\"\"Restarts the given port of the power switch with a specified time\"\"\"\n assert time < 0\n self.set_off(port)\n time.sleep(time_s)\n self.set_on(port)\n\n def set_all_on(self):\n \"\"\"Puts all ports of the power switch on\"\"\"\n for i in range(4):\n self.set_on(i)\n\n def set_all_off(self):\n \"\"\"Puts allports of the power switch off\"\"\"\n for i in range(4):\n self.set_off(i)\n\n def all_restart(self):\n \"\"\"Restarts all ports of the power switch\"\"\"\n for i in range(4):\n self.restart(i)\n\n def all_restart_in(self, time_s):\n \"\"\"Restarts all ports of the power switch with the specified time\"\"\"\n for i in range(4):\n self.restart_in(i, time_s)\n\n def _get_current_status(self):\n \"\"\"Retrieves the current status of the power switch\"\"\"\n rep = requests.get('http://' + self.addr + HIDDENPAGE_)\n rep.encoding = 'ISO-8859-1'\n status = str(rep.text).split('\\n')[9:-3]\n for line in status:\n num_port = int(line[4:5])\n port_sta = line[6:-1]\n self.status[num_port - 1] = GLOBALSTATUS[port_sta]\n\n def _request(self, port, action):\n \"\"\"Performs a request on the given port with the specified action\"\"\"\n assert 0 <= port < 4, \\\n \"port number are from 0 to 3; you asked for port {}\".format(port)\n self.status[port] = GLOBALSTATUS[action[1:]]\n requests.get('http://' + self.addr + HIDDENPAGE_\n + 'M0:O' + str(port+1) + action)\n","sub_path":"dovecot/ext/powerswitch/powerswitch.py","file_name":"powerswitch.py","file_ext":"py","file_size_in_byte":6495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"544015065","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom create_pdf_documents.models import Naliczenie_cala_wspolnota\n\n\n@login_required\ndef usun_naliczenie_wspolnoty_po_wyborze_i_po_sortowaniu(request, my_id, wybieracz, sorter):\n naliczenie = get_object_or_404(Naliczenie_cala_wspolnota, pk=my_id)\n adres = f'/naliczenia/wspolnota/select/{wybieracz}/'\n if request.method == \"POST\":\n naliczenie.delete()\n return redirect(f'/naliczenia/wspolnota/select/{wybieracz}/')\n return render(request, 'usun_naliczenie_wspolnoty.html', {'naliczenie': naliczenie, 'adres': adres})","sub_path":"view_s/deleting/usun_naliczenie_wspolnoty_po_wyborze_i_po_sortowaniu.py","file_name":"usun_naliczenie_wspolnoty_po_wyborze_i_po_sortowaniu.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"240464671","text":"# -*- coding: utf-8 -*-\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom registro.models import UserProfile\nfrom cafeteria_puebla.forms import productosForm, ArticuloForm, Cambiar_precio_form, borrar_producto_form\nfrom datetime import date\nfrom cafeteria.models import productos, Articulo\nfrom django.contrib.auth.decorators import login_required\nimport json\n\n@login_required\ndef inicio(request):\n dict_context = {}\n profileuser = UserProfile.objects.get(user = request.user)\n dict_context['profileuser'] = profileuser\n return render(request,'cafeteria_puebla/cafeteria_inicio.html' ,dict_context)\n@login_required\ndef agregar_producto(request):\n dict_context = {}\n if request.method == 'POST':\n agregar_form = productosForm(data= request.POST)\n if agregar_form.is_valid():\n i = 0\n try:\n j = productos.objects.all().order_by('-id')[0]\n j = j.id+1\n except:\n j = 0\n while(i< int(request.POST.get('numero_productos'))):\n producto = agregar_form.save(commit = False)\n dia = date.today()\n producto.agregado_el_anio = dia.year\n producto.agregado_el_mes = dia.month\n producto.agregado_el_dia = dia.day\n producto.codigo_barras = request.POST.get('codigo_barras')\n producto.precio = request.POST.get('precio')\n producto.id = j\n producto.save()\n i = i+1\n j = j+1\n dict_context['articulo_form'] = ArticuloForm()\n dict_context['agregar_form'] = productosForm()\n dict_context['validador'] = 1\n dict_context['usuario'] = UserProfile.objects.get(user = request.user)\n return render(request, 'cafeteria_puebla/agregarproducto.html', dict_context)\n else:\n dict_context['articulo_form'] = ArticuloForm()\n dict_context['validador'] = 0\n dict_context['agregar_form'] = agregar_form\n dict_context['usuario'] = UserProfile.objects.get(user = request.user)\n return render(request, 'cafeteria_puebla/agregarproducto.html', dict_context)\n agregar_form = productosForm()\n dict_context['usuario'] = UserProfile.objects.get(user = request.user)\n dict_context['articulo_form'] = ArticuloForm()\n dict_context['agregar_form'] = agregar_form\n dict_context['validador'] = 0\n return render(request, 'cafeteria_puebla/agregarproducto.html', dict_context)\n\n@login_required\ndef agregar_articulo(request):\n context_dict = {}\n if request.method == 'POST':\n articulo_form = ArticuloForm(request.POST)\n if articulo_form.is_valid():\n articulo_nuevo = articulo_form.save(commit=False)\n articulo_nuevo.sucursal = 1\n articulo_nuevo.save()\n context_dict['articulo_form'] = ArticuloForm()\n context_dict['validador'] = 1\n context_dict['agregar_form'] = productosForm()\n context_dict['usuario'] = UserProfile.objects.get(user = request.user)\n return render(request, 'cafeteria_puebla/agregarproducto.html', context_dict)\n else:\n context_dict['articulo_form'] = articulo_form\n context_dict['validador'] = 0\n context_dict['agregar_form'] = productosForm()\n context_dict['usuario'] = UserProfile.objects.get(user = request.user)\n return render(request, 'cafeteria_puebla/agregarproducto.html', context_dict)\n context_dict['articulo_form'] = ArticuloForm()\n context_dict['validador'] = 0\n context_dict['agregar_form'] = productosForm()\n context_dict['usuario'] = UserProfile.objects.get(user = request.user)\n return render(request, 'cafeteria_puebla/agregarproducto.html', context_dict)\n\n@login_required\ndef busqueda(request):\n if request.method == 'GET':\n codigo = request.GET['codigo']\n producto = productos.objects.all().filter(codigo_barras=codigo,sucursal=1)\n if producto:\n return HttpResponse(1)\n@login_required\ndef caja_registradora(request):\n context_dict = {}\n context_dict['validador'] = 0\n context_dict['usuario'] = UserProfile.objects.get(user = request.user)\n return render(request,'cafeteria_puebla/caja_registradora.html', context_dict)\n@login_required\ndef muestro_producto(request, producto_slug):\n context_dict = {}\n try:\n producto = Articulo.objects.get(slug = producto_slug)\n context_dict['titulo'] = producto\n dict = []\n pro = productos.objects.all().filter(nombre_producto = producto)\n if (len(pro) != 0):\n dict_dict = {\n 'nombre': producto.nombre,\n 'vendidos':str(productos.objects.all().filter(nombre_producto_id = int(producto.id), vendido = 1).count()),\n 'no_vendidos': str(productos.objects.all().filter(nombre_producto_id = int(producto.id),vendido = 0).count()),\n 'precio': str(pro[0].precio),\n 'costo': str(pro[0].costo),\n 'codigo_barras': str(pro[0].codigo_barras)\n }\n dict.append(dict_dict)\n context_dict['productos'] = dict\n context_dict['usuario'] = UserProfile.objects.get(user = request.user)\n except producto.DoesNotExist:\n context_dict['usuario'] = UserProfile.objects.get(user = request.user)\n return render(request,'cafeteria_puebla/busqueda_producto.html',context_dict)\n\n@login_required\ndef ventas_caja(request):\n if request.method == 'GET':\n id_producto = request.GET['codigo']\n validador = request.GET['cancelado']\n salvar = productos.objects.get(id = id_producto)\n nombre = Articulo.objects.get(id = salvar.nombre_producto_id)\n if int(validador) == 0:\n salvar.vendido = 1\n salvar.vendido_por = request.user\n dia = date.today()\n salvar.vendido_el_anio = dia.year\n salvar.vendido_el_mes = dia.month\n salvar.vendido_el_dia = dia.day\n elif int(validador) == 1:\n salvar.vendido = 0\n salvar.en_caja = 0\n salvar.save()\n return HttpResponse(json.dumps({'nombre':nombre.nombre}),content_type = 'aplication/json')\n@login_required\ndef cambiar_precio_producto(request):\n context_dict = {}\n if request.method == 'POST':\n precio_form = Cambiar_precio_form(request.POST)\n if precio_form.is_valid():\n busqueda = productos.objects.all().filter(nombre_producto_id = request.POST.get('producto')).exclude(vendido = 1)\n for producto in busqueda:\n producto.precio = request.POST.get('precio')\n producto.save()\n context_dict['validador'] = 2\n context_dict['precio_form'] = Cambiar_precio_form()\n context_dict['cafeteria_val'] = 1\n context_dict['usuario'] = UserProfile.objects.get(user = request.user)\n return render(request,'libreria_puebla/cambiar_precio_libro.html', context_dict)\n else:\n context_dict['cafeteria_val'] = 1\n context_dict['validador'] = 0\n context_dict['usuario'] = UserProfile.objects.get(user = request.user)\n context_dict['precio_form'] = precio_form\n return render(request, 'libreria_puebla/cambiar_precio_libro.html', context_dict)\n context_dict['cafeteria_val'] = 1\n context_dict['validador'] = 0\n context_dict['precio_form'] = Cambiar_precio_form()\n context_dict['usuario'] = UserProfile.objects.get(user = request.user)\n return render(request, 'libreria_puebla/cambiar_precio_libro.html', context_dict)\n@login_required\ndef borrar_producto(request):\n context_dict = {}\n if request.method == 'POST':\n borrar_form = borrar_producto_form(request.POST)\n if borrar_form.is_valid():\n borrar = productos.objects.all().filter(nombre_producto_id = int(request.POST.get('producto')), vendido = 0)\n for b in borrar:\n b.delete()\n context_dict['usuario'] = UserProfile.objects.get(user = request.user)\n context_dict['validador'] = 4\n context_dict['borrar_form'] = borrar_producto_form()\n else:\n context_dict['usuario'] = UserProfile.objects.get(user = request.user)\n context_dict['validador'] = 0\n context_dict['borrar_form'] = borrar_producto_form()\n return render(request,'cafeteria_puebla/borrar_producto.html', context_dict)\n\ndef imprimir(request):\n return render(request, 'cafeteria_puebla/probando_imprimir.html')\n","sub_path":"cafeteria_puebla/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"418413365","text":"\"\"\"\n题目:两个乒乓球队进行比赛,各出三人。甲队为a,b,c三人,乙队为x,y,z三人。已抽签决定比赛名单。有人向队员打听比赛的名单。a说他不和x比,c说他不和x,z比,请编程序找出三队赛手的名单。\n\"\"\"\n\nta = ['a','b','c']\ntb = ['x','y','z']\nd = {}\nfor i in range(0,3):\n ua = ta[i]\n for j in range(0,3):\n ub = tb[j]\n if (ua == 'a' and ub == 'x') or (ua=='c' and (ub=='x' or ub=='z')):\n continue\n print( ua ,ub)\n d[ua] = ub\n\n\nprint(d)","sub_path":"test22.py","file_name":"test22.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"407619609","text":"from .Modules import *\nimport os\nimport shutil\nimport subprocess\nimport sys\nimport re\nimport errno\n\nfrom .General import *\n\nclass AndroidProject:\n def __init__(self, env):\n #templates path\n path = os.path.dirname(os.path.realpath(__file__))\n self.templates_dir = os.path.join(path, 'templates/Android')\n\n self.package_name = \"com.example\"\n self.project_name = \"Untitiled\"\n self.version_code = \"1\"\n self.version_name = \"1.00\"\n self.custom_library = None\n self.target_sdk = \"18\"\n self.min_sdk = \"9\"\n self.label = \"@string/app_name\"\n self.icon = \"@drawable/icon\"\n self.main_activity_definition = self.getMainActivityDeclaration()\n self.debuggable = True\n self.release_build = True\n self.__modules = set()\n self.__manifest_permisions = set()\n self.__manifest_permisions_manual = []\n self.__manifest_additionals = []\n self.working_directory = 'project_dir'\n self.output_name = 'tmp'\n self.__output_dir = ''\n self.__cpp_files = []\n self.__cpp_dirs = []\n self.__look_up_dirs = []\n self.env = env\n self.signature = None\n self.res_dir = 'res'\n self.__java_libs = []\n self.__java_files = []\n self.__res_files = []\n self.__external = []\n self.resources_compile_task = None\n self.modules = None\n\n def addCppFile(self, path):\n self.__cpp_files.append(path)\n\n def addCppDir(self, dir):\n self.__cpp_dirs.append(dir)\n\n def addLookUpDir(self, dir):\n self.__look_up_dirs.append(dir)\n\n def addModule(self, name):\n self.__modules.add(name)\n\n def findDir(self, dir_to_find):\n directory = None\n for dir in self.__look_up_dirs:\n path = os.path.join(self.working_directory,\n dir,\n dir_to_find)\n if os.path.isdir(path):\n directory = dir\n break\n\n if directory is None:\n raise Exception('Dir not found: {0}'.format(dir_to_find))\n\n return os.path.join(directory, dir_to_find)\n\n def findFile(self, file):\n directory = None\n for dir in self.__look_up_dirs:\n path = os.path.join(self.working_directory,\n dir,\n file)\n if os.path.exists(path):\n directory = dir\n break\n\n if directory is None:\n raise Exception('File not found: {0}'.format(file))\n\n return os.path.join(directory, file)\n\n def gatherPermissionsAndAdditionals(self):\n #system = Modules.Modules()\n\n self.__modules = self.modules.getRequiredModules(list(self.__modules))\n\n for m in self.__modules:\n module = self.modules.getModule(m)\n self.__manifest_permisions = self.__manifest_permisions.union(set(module.getPermissions()))\n self.__manifest_additionals += module.getManifestAdditional()\n self.__manifest_permisions_manual += module.getPermissionsManual()\n cpp_files = module.getCppFiles()\n\n for file in cpp_files:\n self.__cpp_files.append(self.findFile(file))\n\n java_libs = module.getJavaLibs()\n for file in java_libs:\n self.__java_libs.append(self.findFile(file))\n\n self.__java_files += module.getJavaFiles()\n self.__res_files += module.getResFiles()\n self.__external += module.getExternalProjects();\n\n\n def getMainActivityDeclaration(self,\n screenOrientation=\"sensorLandscape\",\n label=\"@string/app_name\",\n theme=\"@android:style/Theme.NoTitleBar.Fullscreen\",\n configChanges=\"orientation\"):\n main_activity_definition = self.getTemplate('AndroidManifest_main_activity.xml')\n return main_activity_definition.format(\n screenOrientation=screenOrientation,\n label=label,\n theme=theme,\n configChanges=configChanges)\n\n def processCppDir(self, d):\n self.__cpp_files.append(os.path.join(d, \"*.cpp\"))\n for x in os.listdir(os.path.join(self.working_directory, d)):\n if(os.path.isdir(os.path.join(self.working_directory, d, x))):\n self.processCppDir(os.path.join(d,x))\n\n\n def buildJNIFolder(self):\n jni_dir = os.path.join(self.__output_dir, 'jni')\n preparePath(jni_dir)\n\n #Create Application.mk\n app_mk_tpl = self.getTemplate('jni/Application.mk')\n saveToFileIfNeeded(os.path.join(jni_dir, 'Application.mk'),\n app_mk_tpl)\n\n main_cpp = self.getTemplate('jni/main.cpp')\n saveToFileIfNeeded(os.path.join(jni_dir, 'main.cpp'),\n main_cpp)\n\n activity_result_cpp = self.getTemplate('jni/OnActivityResult.cpp')\n activity_result_cpp = activity_result_cpp.replace('{package}', self.package_name.replace('.','_'))\n saveToFileIfNeeded(os.path.join(jni_dir, 'OnActivityResult.cpp'),\n activity_result_cpp)\n\n for d in self.__cpp_dirs:\n self.processCppDir(d)\n\n android_mk_tpl = self.getTemplate('jni/Android.mk')\n cpp_file = sorted(map(unixSlashes, self.__cpp_files))\n\n cocos2dx_dir = self.env.cocos2dx_dir\n cocos2dx_external = os.path.join(cocos2dx_dir, 'cocos2dx/platform/third_party/android/prebuilt')\n\n static_libraries = ''\n custom_import_path = ''\n custom_module = ''\n \n if self.custom_library is not None:\n static_libraries = 'LOCAL_STATIC_LIBRARIES = ' + self.custom_library + '_static'\n folder = os.path.join(self.working_directory, self.findFile(self.custom_library));\n folder = folder + '/..';\n folder = folder.replace('\\\\', '/');\n \n #LOCAL_WHOLE_STATIC = mavka_static\n custom_module = '$(call import-module,' + self.custom_library + ')';\n custom_import_path = '$(call import-add-path, '+ folder +')'\n \n\n file_list = '\\n'.join(\n map(\n lambda x:\n 'FILE_LIST += $(wildcard $(LOCAL_PATH)/../../{0})'.format(x),\n cpp_file)\n )\n\n look_up = sorted(map(unixSlashes, self.__look_up_dirs))\n includes = '\\n'.join(\n map(\n lambda x:\n 'LOCAL_C_INCLUDES += $(LOCAL_PATH)/../../{0}'.format(x),\n look_up)\n )\n android_mk = android_mk_tpl.format(\n file_list=file_list,\n includes=includes,\n cocos2dx_path=cocos2dx_dir,\n cocos2dx_external=cocos2dx_external,\n static_libraries=static_libraries,\n custom_module=custom_module,\n custom_import_path=custom_import_path\n )\n saveToFileIfNeeded(os.path.join(jni_dir, 'Android.mk'),\n android_mk)\n\n def prepareRes(self):\n res_dir = os.path.join(self.__output_dir, 'res')\n if os.path.isdir(res_dir):\n shutil.rmtree(res_dir)\n\n original_res = os.path.join(self.working_directory, self.res_dir)\n if not os.path.isdir(original_res):\n raise Exception('Dir not found {0}'.format(self.res_dir))\n\n shutil.copytree(original_res, res_dir)\n\n for res_file in self.__res_files:\n file = os.path.join(self.working_directory, self.findFile(res_file))\n target = os.path.join(res_dir, res_file)\n\n target_dir = os.path.dirname(target)\n if not os.path.isdir(target_dir):\n os.makedirs(target_dir)\n\n shutil.copy(file, target)\n\n def prepareSrc(self):\n src_dir = os.path.join(self.__output_dir, 'src')\n if os.path.isdir(src_dir):\n shutil.rmtree(src_dir)\n\n os.mkdir(src_dir)\n\n main_java = os.path.join(src_dir, self.package_name.replace('.','/'), 'Main.java')\n main_tpl = self.getTemplate('src/Main.java')\n dir = os.path.dirname(main_java)\n if not os.path.isdir(dir):\n os.makedirs(dir)\n\n file_put_contents(main_java, main_tpl.replace(\n '{packageName}',self.package_name\n ))\n\n for file in self.__java_files:\n target = os.path.join(src_dir, file)\n source = os.path.join(self.working_directory, self.findFile(file))\n\n target_dir = os.path.dirname(target)\n if not os.path.isdir(target_dir):\n os.makedirs(target_dir)\n\n shutil.copy(source, target)\n\n def showError(self, error, file='build_android.py', line='0', code='B0000'):\n showOutputError(error, file=file, line=line, code=code)\n\n def findLinkErrors(self, text):\n err = re.findall(r\"([^\\d][^:]+[^\\d]):([\\d]+):([\\d]+[:]){0,1} (fatal )*error: (.*)\", text)\n for e in err:\n self.showError(e[4], line=e[1], file=e[0], code='L0001')\n\n def getApkPath(self):\n apk_name = '{name}-{type}.apk'\n\n type = 'debug'\n if self.release_build:\n type = 'release'\n\n apk_name = apk_name.format(name=self.project_name,\n type=type)\n\n apk_path = os.path.join(self.__output_dir, 'bin', apk_name)\n return apk_path\n\n def installOnDevice(self):\n adb_command = os.path.join(self.env.android_sdk, 'platform-tools/adb')\n\n apk_path = self.getApkPath()\n\n command = [adb_command,\n 'install',\n '-r',\n apk_path]\n\n print('Installing on device...')\n sys.stdout.flush()\n\n res = subprocess.Popen(command).wait(timeout=300)\n if res != 0:\n raise Exception('Install to device failed')\n\n launch_command = [adb_command,\n 'shell',\n 'am',\n 'start',\n '-n',\n '{0}/{0}.Main'.format(self.package_name)]\n\n print('Running on device...')\n sys.stdout.flush()\n res = subprocess.Popen(launch_command).wait(timeout=10)\n if res != 0:\n raise Exception('Launch on device failed')\n\n\n def buildApk(self):\n ant_dir = self.env.ant_dir\n ant_cmd = os.path.join(ant_dir, 'bin/ant.bat')\n\n build_type = 'debug'\n if self.release_build:\n build_type = 'release'\n\n cur_dir = os.getcwd()\n\n try:\n os.chdir(self.__output_dir)\n command = [ant_cmd,\n build_type,\n '-Dsdk.dir={0}/'.format(self.env.android_sdk)]\n\n sp = subprocess.Popen(command, stderr=subprocess.PIPE)\n out, err = sp.communicate()\n\n error_text = ''\n try:\n error_text = err.decode('utf-8')\n except:\n error_text = err\n print(error_text, file=sys.stderr)\n\n res = sp.returncode\n if res != 0:\n raise Exception('Build .apk Failed')\n\n os.chdir(cur_dir)\n except:\n os.chdir(cur_dir)\n raise\n\n\n\n def compileNativePart(self):\n ndk_build = os.path.join(self.env.android_ndk, 'ndk-build.cmd')\n\n cocos2dx_root = self.env.cocos2dx_dir\n app_android_root = self.__output_dir\n\n command = [ndk_build,\n '-C',\n app_android_root]\n\n sp = subprocess.Popen(command, stderr=subprocess.PIPE)\n out, err = sp.communicate()\n\n error_text = err.decode('utf-8')\n print(error_text, file=sys.stderr)\n self.findLinkErrors(error_text)\n\n res = sp.returncode\n if res != 0:\n raise Exception('Build Native Part Failed')\n\n def prepareAssets(self):\n assets_dir = os.path.join(self.__output_dir, 'assets')\n if not os.path.isdir(assets_dir):\n os.mkdir(assets_dir)\n\n if self.resources_compile_task is not None:\n compileResources(self.env, assets_dir, self.resources_compile_task)\n\n\n def prepareLib(self):\n lib_dir = os.path.join(self.__output_dir, 'libs')\n\n if not os.path.isdir(lib_dir):\n os.mkdir(lib_dir)\n\n libs_hashes = {}\n libs_in_place = set()\n\n for p in os.listdir(lib_dir):\n path = os.path.join(lib_dir, p)\n if not os.path.isdir(path):\n libs_hashes[p] = sha1_file(path)\n libs_in_place.add(p)\n\n for lib in self.__java_libs:\n copy_from = os.path.join(self.working_directory, lib)\n\n lib_name = os.path.basename(lib)\n copy_to = os.path.join(lib_dir, lib_name)\n\n if lib_name not in libs_in_place:\n print(\"Lib added: {0}\".format(lib_name))\n shutil.copy(copy_from, copy_to)\n else:\n hash = sha1_file(copy_from)\n if hash != libs_hashes[lib_name]:\n print(\"Lib updated: {0}\".format(lib_name))\n shutil.copy(copy_from, copy_to)\n libs_in_place.remove(lib_name)\n\n for lib in libs_in_place:\n print(\"Lib deleted: {0}\".format(lib))\n path = os.path.join(lib_dir, lib)\n os.remove(path)\n\n def getExternalDir(self, dirname):\n first = self.__output_dir\n second = os.path.join(self.working_directory,self.findDir(dirname))\n\n return unixSlashes(os.path.relpath(second,first))\n\n def buildManifest(self):\n manifest_name = os.path.join(self.__output_dir, 'AndroidManifest.xml')\n manifest = self.createManifest()\n saveToFileIfNeeded(manifest_name, manifest)\n\n project_properties_tpl = self.getTemplate('project.properties')\n one_external_tpl = 'android.library.reference.{id}={path}\\n'\n\n externals = ''\n\n id = 2\n for dir in self.__external:\n externals += one_external_tpl.format(\n id=id,\n path=self.getExternalDir(dir)\n )\n id += 1\n\n project_properties = project_properties_tpl.format(\n sdkNum=self.target_sdk,\n external=externals\n )\n saveToFileIfNeeded(os.path.join(self.__output_dir, 'project.properties'),\n project_properties)\n\n local_properties_tpl = self.getTemplate('local.properties')\n local_properties = local_properties_tpl.format(\n sdkDir=self.env.android_sdk\n )\n saveToFileIfNeeded(os.path.join(self.__output_dir, 'local.properties'),\n local_properties)\n\n ndk_stack_tpl = self.getTemplate('ndk_stack.bat')\n ndk_stack = ndk_stack_tpl.format(\n sdkDir=self.env.android_sdk,\n ndkDir=self.env.android_ndk,\n projectDir=self.__output_dir\n )\n saveToFileIfNeeded(os.path.join(self.__output_dir, 'ndk_stack.bat'),\n ndk_stack)\n\n build_xml_tpl = self.getTemplate('build.xml')\n build_xml = build_xml_tpl.replace('{projectName}', self.project_name)\n saveToFileIfNeeded(os.path.join(self.__output_dir, 'build.xml'),\n build_xml)\n\n if self.signature is not None:\n ant_properties_tpl = self.getTemplate('ant.properties')\n ant_properties = ant_properties_tpl.format(\n file=unixSlashes(os.path.join('..', self.signature[\"key_file\"])),\n alias=self.signature[\"key_alias\"],\n file_password=self.signature[\"file_password\"],\n alias_password=self.signature[\"alias_password\"]\n )\n saveToFileIfNeeded(os.path.join(self.__output_dir, 'ant.properties'),\n ant_properties)\n\n\n def getTemplate(self, name):\n file = os.path.join(self.templates_dir, name)\n if not os.path.exists(file):\n raise Exception('Template not found {0}'.format(name))\n return file_get_contents(file)\n\n def stepMessage(self, step_number, step_name):\n showStepMessage(step_number, step_name)\n\n def runProject(self):\n try:\n self.__output_dir = os.path.join(self.working_directory, self.output_name)\n\n print('Running project: {0}'.format(self.__output_dir))\n\n self.stepMessage(1, 'Install to device')\n self.installOnDevice()\n\n except Exception as e:\n self.showError(e.__str__())\n sys.exit(errno.ESHUTDOWN)\n\n def buildProject(self):\n\n try:\n self.__output_dir = os.path.join(self.working_directory, self.output_name)\n\n print('Building project: {0}'.format(self.__output_dir))\n\n self.stepMessage(1, 'Prepare project data')\n preparePath(self.__output_dir)\n\n self.gatherPermissionsAndAdditionals()\n self.buildManifest()\n self.buildJNIFolder()\n self.prepareRes()\n self.prepareLib()\n self.prepareSrc()\n\n self.stepMessage(2, 'Build resources')\n self.prepareAssets()\n\n self.stepMessage(3, 'Compile native part')\n self.compileNativePart()\n\n self.stepMessage(4, 'Build .apk')\n self.buildApk()\n\n print(\"BUILD SUCCESSFULL\")\n print(\"Apk: {0}\".format(self.getApkPath()))\n\n except Exception as e:\n self.showError(e.__str__())\n sys.exit(errno.ESHUTDOWN)\n\n def putPackageName(self):\n return lambda x: x.replace('{PACKAGE}', self.package_name)\n \n def createManifest(self):\n\n perm = '\\n'.join(\n list(\n map(\n lambda x:\n '\\t'.format(x),\n sorted(self.__manifest_permisions))\n )\n )\n\n custom_perm = '\\n'.join(\n list(\n map(self.putPackageName(),\n self.__manifest_permisions_manual)\n )\n )\n\n additional = '\\n\\n'.join(\n list(\n map(self.putPackageName(),\n self.__manifest_additionals)\n )\n )\n\n perm += custom_perm;\n\n debuggable_str = ''\n if self.debuggable:\n debuggable_str = 'android:debuggable=\"true\"'\n\n manifest_template = self.getTemplate('AndroidManifest.xml')\n manifest = manifest_template.format(\n package=self.package_name,\n versionCode=self.version_code,\n versionName=self.version_name,\n minSdk=self.min_sdk,\n targetSdk=self.target_sdk,\n permissions=perm,\n label=self.label,\n icon=self.icon,\n debuggable=debuggable_str,\n additional=additional,\n main_activity=self.main_activity_definition\n )\n\n return manifest\n\n\n\n\n","sub_path":"buildutil/AndroidProject.py","file_name":"AndroidProject.py","file_ext":"py","file_size_in_byte":19146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"202905272","text":"from django.urls import path, include\nfrom services import views\nfrom rest_framework import routers\nrouter = routers.DefaultRouter()\nrouter.register(r'users', views.UserViewSet)\nnamespace = 'services'\nurlpatterns = [\n path('index/', views.indexView.as_view(), name='index'),\n path('register/', views.registerView.as_view(), name='form_register'),\n path('register/add', views.addUser.as_view(), name='add'),\n path('register/delete/', views.deleteUser.as_view(), name='delete'),\n path('', include(router.urls)),\n\n]\n\n","sub_path":"services/urls_services.py","file_name":"urls_services.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"453884697","text":"import heapq\n\n\nclass Solution:\n def nthUglyNumber(self, n: int) -> int:\n nums = []\n visited = set()\n queue = [1]\n \n while queue:\n node = heapq.heappop(queue)\n if len(nums) == n:\n break\n nums.append(node)\n for d in [2, 3, 5]:\n idx = node * d\n if idx not in visited:\n heapq.heappush(queue, idx)\n visited.add(idx)\n return nums[-1]\n\n","sub_path":"264_Ugly_Number_II/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"292802402","text":"import nltk\nimport argparse\nimport torch\nimport numpy as np\nfrom torch import nn, optim\nfrom torch.utils.data import TensorDataset, DataLoader\nfrom model import Model\nfrom dataset import Dataset\n\ndef train(dataset, model, args):\n model.train()\n\n dataloader = DataLoader(dataset, batch_size=args.batch_size)\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.Adam(model.parameters(), lr=0.001)\n\n for epoch in range(args.max_epochs):\n state_h, state_c = model.init_state(args.sequence_length)\n\n for batch, (x, y) in enumerate(dataloader):\n optimizer.zero_grad()\n\n y_pred, (state_h, state_c) = model(x, (state_h, state_c))\n loss = criterion(y_pred.transpose(1, 2), y)\n\n state_h = state_h.detach()\n state_c = state_c.detach()\n\n loss.backward()\n optimizer.step()\n\n print({ 'epoch': epoch, 'batch': batch, 'loss': loss.item() })\n\ndef predict(dataset, model, text, next_words=100):\n model.eval()\n\n words = text.split(' ')\n state_h, state_c = model.init_state(len(words))\n\n for i in range(0, next_words):\n x = torch.tensor([[dataset.word_to_index[w] for w in words[i:]]])\n y_pred, (state_h, state_c) = model(x, (state_h, state_c))\n\n last_word_logits = y_pred[0][-1]\n p = torch.nn.functional.softmax(last_word_logits, dim=0).detach().numpy()\n word_index = np.random.choice(len(last_word_logits), p=p)\n words.append(dataset.index_to_word[word_index])\n\n return words\n\nwith open('actual_both_dialogues.txt', 'r', encoding='utf8') as doc:\n text_tokenized = []\n for line in doc.readlines():\n try:\n speaker, dialogue = line.split(': ', 1)\n dialogue = nltk.word_tokenize(dialogue.lower())\n text_tokenized.append(speaker.lower()+\":\") \n text_tokenized += dialogue\n except ValueError:\n print(line)\n\n\nword_to_ix = {key:val for val,key in enumerate(set(text_tokenized))}\nix_to_word = {val:key for key,val in word_to_ix.items()}\n\nint_script = [word_to_ix[word] for word in text_tokenized]\n\n\ndef batch_data(int_script, seq_length, batch_size):\n f, t = [], []\n\n # separating the speaker from the line\n for i in range(len(int_script) - seq_length):\n f.append(int_script[i : i+seq_length])\n t.append(int_script[i+seq_length])\n\n data_set = TensorDataset(torch.tensor(f, dtype=torch.long), torch.tensor(t, dtype=torch.long))\n \n return DataLoader(data_set, shuffle=True, batch_size=batch_size)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--max-epochs', type=int, default=10)\nparser.add_argument('--batch-size', type=int, default=256)\nparser.add_argument('--sequence-length', type=int, default=4)\nargs = parser.parse_args()\n\ndataset = Dataset(args)\nmodel = Model(dataset)\n\ntrain(dataset, model, args)\nprint(predict(dataset, model, text='Spongebob Squarepants'))","sub_path":"training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":2928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"415279509","text":"\"\"\" Scraping \"\"\"\nfrom urllib.request import urlopen\nimport re\n\ndef grab_html():\n url = input(\"Pega aquí tu url.\")\n page = urlopen(url)\n html_bytes = page.read()\n global html \n html = html_bytes.decode(\"utf-8\")\n\n\ngrab_html()\n\npattern = \".*?\"\nmatches = re.search(pattern, html, re.IGNORECASE)\ntitle = matches.group()\ntitle = re.sub(\"<.*?>\", \"\", title)\nprint(title)\n\n","sub_path":"scraps/regex_html_fun.py","file_name":"regex_html_fun.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"495044343","text":"import fileinput\nimport re\nimport os\nimport logging\nfrom shutil import copy2\nfrom glob import glob\n\nRE_H = r\"#+ (.*)\"\nRE_ENDH = r\"^##\"\nlogging.basicConfig(format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s', datefmt='%m-%d:%H:%M:%S', level=logging.DEBUG)\n\ndef inject_problem(file_name):\n new_file = f\"{file_name}_with_problem.md\"\n with open(new_file, \"w\") as f:\n with open(f\"{file_name}.md\", \"r\") as g:\n lines = g.read().split(\"\\n\")\n i = -1\n cur_h = None\n while i + 1 < len(lines):\n line = lines[i]\n i += 1\n\n match_h = re.match(RE_H, line)\n cur_h = match_h.group(1).lower() if match_h else cur_h\n\n f.write(line + \"\\n\")\n if i + 1 == len(lines) or re.match(RE_ENDH, lines[i + 1]):\n for image in glob(f\"problem/{cur_h}/images/*\"):\n logging.debug(f\"{image}\")\n copy2(image, \"book/images\")\n for problem in glob(f\"problem/{cur_h}/*\"):\n logging.debug(f\"{problem}\")\n if os.path.isdir(problem):\n continue\n f.write(f\"* [Edit](https://github.com/SeanHwangG/class/edit/main/{problem})\\n\")\n f.write(open(problem, 'r').read())\n f.write(f\"\\n\")\n logging.info(f\"{new_file} has been generated\")\n\n\nif __name__ == \"__main__\":\n files = [\"book/sql\", \"book/syntax\", \"book/algorithm\"]\n for file in files:\n inject_problem(file)\n","sub_path":"class/update_md.py","file_name":"update_md.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"17460724","text":"# This file provide funtions to routing to ordering pages.\n\nfrom flask import session\nfrom . import orders\nfrom .utils import *\nfrom time import *\nfrom flask_login import login_required\n\norders_creation_page = redirect('/test/page')\n\ncreate_complete_return = jsonify([0, 'Done.'])\ncreate_invalid_value_return = jsonify([1, 'Value cannot be less than 0.'])\ncreate_house_not_found_return = jsonify([2, 'House does not exist.'])\ncreate_self_dealing_return = jsonify([3, 'Dealing with yourself is prevented.'])\ncreate_conflict_return = jsonify([4, 'An order using this house at this time is already confirmed.'])\n\n# for creating new orders.\n@orders.route('/orders/new')\ndef get_orders_creating_page():\n return orders_creation_page\n\ndef create_id():\n \"\"\"\n Check the max id from orders table, and select a bigger integer.\n \"\"\"\n db = database.getdb()\n # Notice there is a global write lock to the databse file,\n # this command should run properly with reading access.\n cmd = 'select MAX(id) from orders'\n max_id = db.execute(cmd).fetchall()\n if len(max_id) == 0:\n max_id.append(('0', None))\n return str(int(max_id[0][0]) + 1)\n \n# test url: http://127.0.0.1:5000/orders/create?user=947426443@qq.com&house=1017002344&value=1000&time=2018-9-26\n@orders.route('/orders/create', methods=['GET', 'POST'])\n@login_required\ndef create_order():\n \"\"\"\n The frontend submits a form to makeup the database.\n Parameter structure:\n user : string\n house : string\n value : string\n time : string 'yyyy-mm-dd'\n use argument form=true to enable form data delivering.\n \"\"\"\n \n data = select_data_source()\n user = data['user']\n house = data['house']\n value = data['value']\n time = get_std_time_str(data['time'])\n \n db = database.getdb()\n \n ### User permision check.\n \n if check_user_permission(user) : return permission_denied_return\n \n ### Value validate check.\n \n if int(value) < 0 :\n return create_invalid_value_return\n \n ### House existion check.\n \n cmd = 'select * from houses where id==\"{0}\"'.format(house)\n house_info = db.execute(cmd).fetchall()\n if len(house_info) == 0 :\n return create_house_not_found_return\n \n ### House master and user comparation check.\n \n cmd = 'select master from houses where id==\"{0}\"'.format(house)\n house_master = db.execute(cmd).fetchall()[0][0]\n if user == house_master :\n return create_self_dealing_return\n \n ### Conflict time check.\n \n cmd = 'select * from orders where house==\"{0}\" AND time==\"{1}\" AND passed==1'.format(house, time)\n print('conflict: ', cmd)\n orders_conflict = db.execute(cmd).fetchall()\n if len(orders_conflict) != 0 :\n return create_conflict_return\n \n ### Check finished.\n ### Do the operation.\n \n cmd = 'insert into orders values (' + ','.join([\n '\"{0}\"'.format(create_id()), # id\n '\"{0}\"'.format(time), # time\n '{0}'.format(value), # value\n '\"{0}\"'.format(user), # customer\n '\"{0}\"'.format(house_master), # owner\n '0',\n '0',\n '0',\n '\"{0}\"'.format(house), # house\n '0', # passed\n '0' # done\n ]) + ')'\n db.execute(cmd)\n db.commit()\n \n print('order confirmed: cutomer: {} owner: {} time: {}'.format(user, house_master, time))\n \n return create_complete_return\n","sub_path":"app/orders/orders_new.py","file_name":"orders_new.py","file_ext":"py","file_size_in_byte":3545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"413017800","text":"import tensorflow as tf\nimport numpy as np\nimport sys\nimport os\n\nfrom data.synth_graphs.parse import parse_and_stats\nfrom data.graph_dataset_preprocessing import to_dict, to_dict_batch\n\nfrom genmodel.htmm.bottom_up import BottomUpHTMM\nfrom genmodel.htmm.top_down import TopDownHTMM\nfrom graph_htmn.generative_inference import generative_inference\nfrom graph_htmn.recurrent_rdn import RecurrentRDN\nfrom sklearn.model_selection import StratifiedShuffleSplit\n\ngpus = tf.config.experimental.list_physical_devices('GPU')\nif gpus:\n # Restrict TensorFlow to only use the first GPU\n try:\n tf.config.experimental.set_visible_devices([], 'GPU')\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPU\")\n except RuntimeError as e:\n # Visible devices must be set before GPUs have been initialized\n print(e)\n\ndataset, n_bu, n_td, C, batch_size = sys.argv[1], int(sys.argv[2]), int(sys.argv[3]), \\\n int(sys.argv[4]), int(sys.argv[5])\n\ntrain_data, eval_data, test_data, max_trees, L = parse_and_stats(dataset)\n\ntrain_feat, train_lab = to_dict(train_data['adj'], train_data['nodes'], L), train_data['lab']\ntrain_feat = to_dict_batch(train_feat, max_trees)\n\neval_feat, eval_lab = to_dict(eval_data['adj'], eval_data['nodes'], L), eval_data['lab']\neval_feat = to_dict_batch(eval_feat, max_trees)\n\nbu_model = BottomUpHTMM(n_bu, C, L, 5)\ntd_model = TopDownHTMM(n_td, C, L, 5)\nrdn = RecurrentRDN('C', 3, n_bu, n_td, max_trees)\n\nadam_opt = tf.keras.optimizers.Adam(learning_rate=1e-3)\n\ncce = tf.keras.losses.CategoricalCrossentropy(from_logits=True)\nloss_mean = tf.keras.metrics.Mean()\n\naccuracy_mean = tf.keras.metrics.Mean()\naccuracy = tf.keras.metrics.Accuracy()\n\ntrain_dataset = tf.data.Dataset.from_tensor_slices((train_feat, train_lab))\ntrain_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size)\neval_dataset = tf.data.Dataset.from_tensor_slices((eval_feat, eval_lab)).batch(batch_size)\n\n\ndef train_step(batch_features, batch_labels, bu_model, td_model, rdn, adam_opt):\n with tf.GradientTape() as bu_tape:\n bu_likelihood = generative_inference(batch_features, bu_model, max_trees)\n to_div = tf.expand_dims(tf.cast(batch_features['n_trees'], dtype=tf.float32), axis=-1)\n aux_bu_likelihood = tf.reduce_sum(bu_likelihood, axis=1)/to_div\n neg_bu_likelihood = -1 * tf.reduce_mean(aux_bu_likelihood, axis=0)\n\n with tf.GradientTape() as td_tape:\n td_likelihood = generative_inference(batch_features, td_model, max_trees)\n to_div = tf.expand_dims(tf.cast(batch_features['n_trees'], dtype=tf.float32), axis=-1)\n aux_td_likelihood = tf.reduce_sum(td_likelihood, axis=1)/to_div\n neg_td_likelihood = -1 * tf.reduce_mean(aux_td_likelihood, axis=0)\n\n with tf.GradientTape() as rdn_tape:\n logits = rdn(bu_likelihood, td_likelihood)\n one_hot = tf.one_hot(batch_labels, 3)\n loss = cce(one_hot, logits)\n\n bu_grads = bu_tape.gradient(neg_bu_likelihood, bu_model.trainable_weights)\n td_grads = td_tape.gradient(neg_td_likelihood, td_model.trainable_weights)\n rdn_grads = rdn_tape.gradient(loss, rdn.trainable_weights)\n\n adam_opt.apply_gradients(zip(bu_grads, bu_model.trainable_weights))\n adam_opt.apply_gradients(zip(td_grads, td_model.trainable_weights))\n adam_opt.apply_gradients(zip(rdn_grads, rdn.trainable_weights))\n\n return loss\n\n\ndef eval_step(batch_features, batch_labels, bu_model, td_model, rdn):\n bu_likelihood = generative_inference(batch_features, bu_model, max_trees)\n td_likelihood = generative_inference(batch_features, td_model, max_trees)\n\n logits = rdn(bu_likelihood, td_likelihood)\n loss = cce(batch_labels, logits)\n\n predictions = tf.argmax(logits, axis=0)\n acc = accuracy(batch_labels, predictions)\n return loss, acc\n\n\nfor epoch in range(100):\n tf.print('Start of epoch %d' % (epoch,))\n for step, (batch_features, batch_labels) in enumerate(train_dataset):\n tf.print(\"Step\", step)\n loss = train_step(batch_features, batch_labels, bu_model, td_model, rdn, adam_opt)\n\n weight = [batch_labels.shape[0]/batch_size]\n loss_mean.update_state(loss, [weight])\n\n if step % 10 == 0:\n print(\" Loss during step\", step, \"=\", loss_mean.result().numpy())\n loss_mean.reset_states()\n\n loss_mean.reset_states()\n print('Starting evaluation %d' % (epoch, ))\n for batch_features, batch_labels in eval_dataset:\n loss, acc = eval_step(batch_features, batch_labels, bu_model, td_model, rdn)\n\n weight = [batch_labels.shape[0]/batch_size]\n accuracy_mean.update_state(acc, [weight])\n loss_mean.update_state(loss, [weight])\n\n print('Evaluation result:')\n print(' Loss = ', loss_mean.result().numpy())\n print(' Accuracy = ', accuracy_mean.result().numpy())\n accuracy_mean.reset_states()\n loss_mean.reset_states()\n","sub_path":"graph_htmn_main.py","file_name":"graph_htmn_main.py","file_ext":"py","file_size_in_byte":4996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"64026206","text":"import pandas as pd\nimport datetime as dt\n\n### This script takes the actions data and adds 'inactive' actions when appropriate. ###\n\n# EDIT to point to the folder that contains the allactions.csv file\ndataanalysis_path='/Users/jim/Dropbox/Projects/smartCAD/data_analysis/CharlottesvilleHS2016'\n\n# EDIT to change criteria for when to add 'inactive'\n# When the time gap is less than 'low' OR more than 'high', do nothing\n# When the time gap is between 'low' and 'high' then add 'inactive'\nlow = dt.timedelta(minutes=2) # if students stop work for a few moments, don't counts as inactive\nhigh = dt.timedelta(hours=2) # if students stop work for the day, don't count as inactive\n\n#create list of inactives\ndf = pd.DataFrame.from_csv(dataanalysis_path+'/allactions.csv') # Gets the actions data\nallStudentIDs = df.StudentID.unique().tolist()\nallinactives=[]\nfor StudentID in allStudentIDs:\n thisStudent = df.loc[df['StudentID'] == StudentID].sort_values('Timestamp') # data for one student, sorted chronologically\n format = '%Y-%m-%d %X'\n prior_timestamp = dt.datetime.strptime('2000-01-01 01:01:01', format) # some time a long time before study\n for index, row in thisStudent.iterrows():\n this_timestamp = dt.datetime.strptime(row['Timestamp'], format)\n delta = this_timestamp - prior_timestamp\n while delta>low and delta minutes until time gap is filled\n prior_timestamp+=low\n delta = this_timestamp - prior_timestamp\n prior_timestamp = this_timestamp\n\n# add inactives to original actions, resort and output as csv\ndf_allinactives = pd.DataFrame(allinactives, columns=['StudentID', 'Timestamp', 'Action'])\ndf_combined = df.append(df_allinactives, ignore_index=True)\ndf_combined.sort_values(['StudentID','Timestamp'], inplace=True)\ndf_combined.reset_index(inplace=True, drop=True)\ndf_combined.to_csv(dataanalysis_path+'/allactions_withinactives.csv', encoding='utf-8')\nprint(len(df), 'original actions +', len(df_allinactives), 'inactivites = ',len(df_combined), ' total actions')\n\n\n","sub_path":"processlogscripts/add inactive to actions.py","file_name":"add inactive to actions.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"138344107","text":"import main_lab1_task2 as task2\nimport random as rand\n\n\n# mstr = '015013222301023111110333512'\n\n# print(mstr[0 + 5 * 1:5 + 5 * 2:1])\n# print(mstr[1:5 + 10])\n\nmstr = ''\n#\nfor i in range(0, 10 **3):\n mstr += str(rand.randint(0, 6))\n#\n# print(mstr)\n#\nt = 5\nm = 3\nv = task2.str_splitter(mstr, t)\n#\n# print(\"r = {0}\".format(len(v)))\n#\np = []\nd = task2.d_m(m)\n\np = task2.p(d, t, v)\n\nprint(p)\n#\nprint(task2.chi_square(v, p, len(mstr), t))\n#\n#\n#\n#\n#\n#\n#\n","sub_path":"fulllabs/test_lab1_task2.py","file_name":"test_lab1_task2.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"324184444","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom __future__ import print_function\r\n\r\nimport os\r\nimport subprocess\r\nfrom NicePrinter import title, bold, red, box, yellow\r\nfrom PyInquirer import prompt\r\nfrom examples import custom_style_1\r\nfrom progress.bar import Bar\r\n\r\nos.system('clear')\r\nprint(title(bold(red(\"EXIT\")), 75, '-'))\r\nprint()\r\n\r\nquestions = [\r\n {\r\n 'type': 'confirm',\r\n 'message': 'Do you want to exit?',\r\n 'name': 'exit',\r\n 'default': False,\r\n },\r\n]\r\n\r\nanswers = prompt(questions, style=custom_style_1)\r\nif answers['exit']:\r\n with Bar('Exiting', max=50000) as bar:\r\n for i in range(50000):\r\n bar.next()\r\n print(box(yellow(\"You left mailbox successfully.\")))\r\n subprocess.call(['cowsay', 'Thank you for using mailbox. Come back as soon as possible.'])\r\n","sub_path":"python_scripts/exit.py","file_name":"exit.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"167588048","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport cv2\nimport numpy as np\n\nTEMPLATE_SIZE = 25\nMOVING_THRESHOLD = 3\nOFFSET = 10\n\nclass CameraResult(object):\n\tdef __init__(self, moving, caps, areas, centers, rects):\n\t\tself.moving = moving\n\t\tself.images = caps\n\t\tself.areas = areas\n\t\tself.centers = centers\n\t\tself.rects = rects\n\nclass TableCamera(object):\n\t__instance = None\n\n\tdef __init__(self, disp_size, dev=0):\n\t\tcam = cv2.VideoCapture(dev)\n\t\tcam.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 640)\n\t\tcam.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 480)\n\t\tself._cam = cam\n\t\tself._mask = None\n\t\tself._frame = None\n\t\tself._matrix = None\n\t\tself._capture_size = None\n\t\tself._num_of_nochange = 0\n\t\tself._centers_buff = []\n\t\tself._disp_size = disp_size\n\n\tdef __new__(cls, *args, **keys):\n\t\tif cls.__instance is None:\n\t\t\tcls.__instance = object.__new__(cls)\n\t\treturn cls.__instance\n\n\tdef get_capture_size(self):\n\t\treturn self._capture_size\n\n\tdef is_calibrated(self):\n\t\treturn self._matrix is not None and self._capture_size[0] > 500 and self._capture_size[1] > 300\n\n\tdef calibration(self):\n\t\t_, frame = self._cam.read()\n\t\tframe = cv2.flip(frame, -1)\n\n\t\ttl = self._patternMatch(frame, \"marker/marker_left_top.jpg\")\n\t\ttr = self._patternMatch(frame, \"marker/marker_right_top.jpg\")\n\t\tbl = self._patternMatch(frame, \"marker/marker_left_bottom.jpg\")\n\t\tbr = self._patternMatch(frame, \"marker/marker_right_bottom.jpg\")\n\n\t\ttl = tl\n\t\ttr = (tr[0] + TEMPLATE_SIZE, tr[1])\n\t\tbl = (bl[0], bl[1] + TEMPLATE_SIZE)\n\t\tbr = (br[0] + TEMPLATE_SIZE, br[1] + TEMPLATE_SIZE)\n\n\t\tframe_width = ((tr[0] - tl[0]) + (br[0] - bl[0])) // 2\n\t\tframe_height = ((bl[1] - tl[1]) + (br[1] - tr[1])) // 2\n\t\tself._capture_size = (frame_width, frame_height)\n\n\t\tself._matrix = self._getPerspectiveTransform([tl, tr, br, bl], self._capture_size)\n\t\tframe = cv2.warpPerspective(frame, self._matrix, self._capture_size)\n\n\t\tcv2.imwrite(\"camera_log/calib_result.jpg\", frame)\n\n\tdef _patternMatch(self, image, pattern):\n\t\ttemplate = cv2.imread(pattern, 0)\n\t\ttemplate = cv2.resize(template, (TEMPLATE_SIZE, TEMPLATE_SIZE))\n\t\tgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\t\t_, gray = cv2.threshold(gray, 120, 255, cv2.THRESH_BINARY)\n\t\tcv2.imwrite(\"camera_log/ptnMatch.jpg\", gray)\n\t\tres = cv2.matchTemplate(gray, template, cv2.TM_CCOEFF_NORMED)\n\t\tmin_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)\n\t\t#return max_loc if max_val > 0.5 else None\n\t\treturn max_loc\n\n\tdef _getPerspectiveTransform(self, src, size):\n\t\tsrc = np.float32(src)\n\t\tdst = np.float32([(0,0), (size[0],0), (size[0], size[1]), (0, size[1])])\n\t\treturn cv2.getPerspectiveTransform(src, dst)\n\n\tdef capture(self):\n\t\tassert self._matrix is not None\n\t\t_, frame = self._cam.read()\n\t\tframe = cv2.flip(frame, -1)\n\t\tframe = cv2.warpPerspective(frame, self._matrix, self._capture_size)\n\n\t\tmask = self._getMaskFromThreshold(frame)\n\t\tmasked = cv2.bitwise_and(frame, frame, mask=mask)\n\n\t\tcontours, areas = self._getContours(mask)\n\n\t\tobjects = self._getContourImage(frame, contours)\n\n\t\tcenters = []\n\t\tfor c,_,_ in contours:\n\t\t\tcenters.append((int(c[0]), int(c[1])))\n\t\tif self._is_moving(centers):\n\t\t\tself._num_of_nochange = 0\n\t\telse:\n\t\t\tself._num_of_nochange += 1\n\n\t\trects = self._get_display_rects(contours)\n\t\tcenters = self._get_display_centers(centers)\n\t\tresult = CameraResult((self._num_of_nochange < MOVING_THRESHOLD),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tobjects, areas, centers, rects)\n\t\treturn result\n\t\n\tdef _getContourImage(self, img, contours):\n\t\timages = []\n\t\tfor center, size, angle in contours:\n\t\t\tmatrix = cv2.getRotationMatrix2D(center, angle, 1.0)\n\t\t\trotate = cv2.warpAffine(img, matrix, (img.shape[1], img.shape[0]), flags=cv2.INTER_CUBIC)\n\t\t\tcrop = rotate[int(center[1] - size[1]/2):int(center[1] + size[1]/2),int(center[0] - size[0]/2):int(center[0] + size[0]/2),:]\n\t\t\timages.append(crop)\n\t\treturn images\n\n\tdef _getMaskFromThreshold(self, img):\n\t\tgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\t\t#_, mask = cv2.threshold(gray, 120, 255, cv2.THRESH_BINARY)\n\t\t_, mask = cv2.threshold(gray, 60, 255, cv2.THRESH_BINARY)\n\t\tmask = cv2.bitwise_not(mask)\n\t\tcv2.imwrite(\"camera_log/mask.jpg\", mask)\n\t\treturn mask\n\t\t\n\tdef _getContours(self, mask):\n\t\tcontours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n\t\tarea_rects = []\n\t\tareas = []\n\t\tfor i in xrange(len(contours)):\n\t\t\tarea = cv2.contourArea(contours[i])\n\t\t\tif area < 2000:\n\t\t\t\tcontinue\n\t\t\tcenter, size, angle = cv2.minAreaRect(contours[i])\n\t\t\tif angle < -45:\n\t\t\t\tsize = tuple(reversed(size))\n\t\t\t\tangle = angle + 90\n\t\t\tarea_rects.append((center, size, angle))\n\t\t\tareas.append(area)\n\t\treturn area_rects, areas\n\n\tdef _get_display_rects(self, contours):\n\t\tcw,ch = self.get_capture_size()\n\t\trects = []\n\t\tfor cnt in contours:\n\t\t\trect = []\n\t\t\tfor p in cv2.cv.BoxPoints(cnt):\n\t\t\t\trect.append((p[0] / cw) * self._disp_size[0])\n\t\t\t\trect.append((1.0 - p[1] / ch) * self._disp_size[1])\n\t\t\trects.append(rect)\n\t\treturn rects\n\n\tdef _get_display_centers(self, centers):\n\t\tcw,ch = self.get_capture_size()\n\t\tret = []\n\t\tfor pos in centers:\n\t\t\tx = int((pos[0] / cw) * self._disp_size[0])\n\t\t\ty = int((pos[1] / ch) * self._disp_size[1])\n\t\t\tret.append((x,y))\n\t\treturn ret\n\t\t\t\n\tdef _is_moving(self, centers):\n\t\tret = True \n\t\tif len(self._centers_buff) == len(centers):\n\t\t\tret = False\n\t\t\tfor i in xrange(len(centers)):\n\t\t\t\tx1, y1 = self._centers_buff[i]\n\t\t\t\tx2, y2 = centers[i]\n\t\t\t\tif abs(x1 - x2) > OFFSET or abs(y1 - y2) > OFFSET :\n\t\t\t\t\tret = True\n\t\t\t\t\tbreak\n\t\tself._centers_buff = centers\n\t\treturn ret\n\n\tdef release(self):\n\t\tself._cam.release()\n\nif __name__=='__main__':\n\tcam = TableCamera()\n\tcam.calibration()\n\n\tfor i in xrange(200):\n\t\tresult = cam.capture()\n\t\tprint(result.moving)\n\n","sub_path":"cicrops/table_camera.py","file_name":"table_camera.py","file_ext":"py","file_size_in_byte":5684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"534294548","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jan 22 16:23:13 2021\r\n\r\n@author: Dodo_Shahm\r\n\"\"\"\r\n\r\nimport cv2 # computer vision library\r\nimport helper as h\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.image as mpimg\r\nimport random\r\n\r\n# Image data directories\r\nimage_dir_training = \"day_night_images/training/\"\r\nimage_dir_test = \"day_night_images/test/\"\r\n\r\n# Using the load_dataset function in helpers.py\r\n# Load training data\r\nIMAGE_LIST = h.load_dataset(image_dir_training)\r\n\r\n# Using the load_dataset function in helpers.py\r\n# Load test data\r\nTEST_IMAGE_LIST = h.load_dataset(image_dir_test)\r\n\r\n\r\n# Standardize all training images\r\nSTANDARDIZED_LIST = h.standardize(IMAGE_LIST)\r\n\r\n# Standardize the test data\r\nSTANDARDIZED_TEST_LIST = h.standardize(TEST_IMAGE_LIST)\r\n\r\n# Testing average brightness levels\r\n# Look at a number of different day and night images and think about \r\n# what average brightness value separates the two types of images\r\n\r\n# Find the average of the averages from all day and night images\r\n\r\nnight_brightness = []\r\nday_brightness = []\r\n\r\nfor image in STANDARDIZED_LIST:\r\n \r\n if image[1] == 0:\r\n night_brightness.append(h.avg_brightness(image[0]))\r\n elif image[1] == 1:\r\n day_brightness.append(h.avg_brightness(image[0]))\r\n\r\navg_day_brightness = np.mean(day_brightness)\r\navg_night_brightness = np.mean(night_brightness)\r\n\r\n\r\n\r\n\r\n\r\n# Shuffle the standardized test data\r\nrandom.shuffle(STANDARDIZED_TEST_LIST)\r\n\r\n# Find all misclassified images in a given test set\r\nMISCLASSIFIED = h.get_misclassified_images(STANDARDIZED_TEST_LIST, avg_day_brightness, avg_night_brightness)\r\n\r\n# Accuracy calculations\r\ntotal = len(STANDARDIZED_TEST_LIST)\r\nnum_correct = total - len(MISCLASSIFIED)\r\naccuracy = num_correct/total\r\n\r\nprint('Accuracy: ' + str(accuracy))\r\nprint(\"Number of misclassified images = \" + str(len(MISCLASSIFIED)) +' out of '+ str(total))\r\n\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"588518399","text":"# Copyright 2015 Google Inc. All Rights Reserved.\n\n\"\"\"'functions list' command.\"\"\"\n\nimport sys\n\nfrom googlecloudsdk.core import properties\n\nfrom apitools.base import py as apitools_base\nfrom googlecloudsdk.calliope import arg_parsers\nfrom googlecloudsdk.calliope import base\nfrom googlecloudsdk.core.util import list_printer\n\n\nclass List(base.Command):\n \"\"\"Lists all the functions in a given region.\"\"\"\n\n @staticmethod\n def Args(parser):\n \"\"\"Register flags for this command.\"\"\"\n parser.add_argument(\n '--limit', default=None,\n help='If greater than zero, the maximum number of results.',\n type=arg_parsers.BoundedInt(1, sys.maxint))\n\n def Run(self, args):\n \"\"\"This is what gets called when the user runs this command.\n\n Args:\n args: an argparse namespace. All the arguments that were provided to this\n command invocation.\n\n Returns:\n A list object representing user functions.\n \"\"\"\n client = self.context['functions_client']\n return apitools_base.YieldFromList(\n service=client.projects_regions_functions,\n request=self.BuildRequest(args),\n limit=args.limit, field='functions',\n batch_size_attribute='pageSize')\n\n def BuildRequest(self, args):\n \"\"\"This method creates a ListRequest message to be send to GCF.\n\n Args:\n args: an argparse namespace. All the arguments that were provided to this\n command invocation.\n\n Returns:\n A ListRequest message.\n \"\"\"\n messages = self.context['functions_messages']\n project = properties.VALUES.core.project.Get(required=True)\n location = 'projects/{0}/regions/{1}'.format(\n project, args.region)\n return messages.CloudfunctionsProjectsRegionsFunctionsListRequest(\n location=location)\n\n def Display(self, unused_args, result):\n \"\"\"This method is called to print the result of the Run() method.\n\n Args:\n unused_args: The arguments that command was run with.\n result: The value returned from the Run() method.\n \"\"\"\n list_printer.PrintResourceList('functions.projects.regions.functions',\n result)\n","sub_path":"googsdk/google-cloud-sdk/lib/googlecloudsdk/functions/commands/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"153828199","text":"class Parameters():\r\n color_space='RGB'\r\n spatial_size=(32, 32)\r\n hist_bins=8\r\n orient=9\r\n pix_per_cell=8\r\n cell_per_block=2\r\n hog_channel=0\r\n hist_range = (0, 256)\r\n spatial_feat=True\r\n hist_feat=True\r\n hog_feat=True\r\n def __init__(self, color_space='RGB', spatial_size=(32, 32),\r\n hist_bins=8, orient=9, \r\n pix_per_cell=8, cell_per_block=2, hog_channel=0, scale = 1.5,hist_range = (0, 256),\r\n spatial_feat=True, hist_feat=True, hog_feat=True):\r\n # HOG parameters\r\n self.color_space = color_space\r\n self.spatial_size = spatial_size\r\n self.hist_bins = hist_bins\r\n self.orient = orient\r\n self.pix_per_cell = pix_per_cell\r\n self.cell_per_block = cell_per_block\r\n self.hog_channel = hog_channel\r\n self.scale = scale\r\n self.spatial_feat = spatial_feat\r\n self.hist_feat = hist_feat\r\n self.hog_feat = hog_feat\r\n self.hist_range = hist_range","sub_path":"parameters.py","file_name":"parameters.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"131213346","text":"'''7. Write a program which contains one function that accept one number from user and returns\r\ntrue if number is divisible by 5 otherwise return false.'''\r\n\r\ndef DivisibleByFive(num):\r\n if (num%5)==0:\r\n return true;\r\n else:\r\n return false;\r\n\r\ndef main():\r\n num = input(\"Enter a number: \");\r\n print(DivisibleByFive(int(num)));\r\n \r\nif __name__ == \"__main__\":\r\n main();\r\n","sub_path":"Assignments/Assignment1/Assignment1_6.py","file_name":"Assignment1_6.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"214501786","text":"import functools\nimport logging\nimport typing\n\nimport collections\nimport pyproj\nimport shapely\nimport shapely.ops\nimport tqdm\nfrom rtree import index\nfrom shapely.geometry import Point, Polygon, LineString\n\nimport utils\n\n__multipliers = {\n 'node' : lambda x: x*3,\n 'way' : lambda x: x*3+1,\n 'relation': lambda x: x*3+2,\n}\n\n\ndef _get_id(soup):\n \"\"\"Converts overlapping identifiers for node, ways and relations in single integer space\"\"\"\n return __multipliers[soup['type']](int(soup['id']))\n\n\ndef get_soup_position(soup):\n \"\"\"Extracts position for way/node as bounding box\"\"\"\n if soup['type'] == 'node':\n return (float(soup['lat']), float(soup['lon'])) * 2\n\n if soup['type'] in ('way', 'relation'):\n b = soup.get('bounds')\n if b:\n return tuple(float(x) for x in (b['minlat'], b['minlon'], b['maxlat'], b['maxlon']))\n else:\n raise TypeError(\"OSM Data doesn't contain bounds for ways and relations!\")\n raise TypeError(\"%s not supported\" % (soup['type'],))\n\n\ndef get_soup_center(soup):\n # lat, lon\n pos = get_soup_position(soup)\n return (pos[0] + pos[2])/2, (pos[1] + pos[3])/2\n\n\n__geod = pyproj.Geod(ellps=\"WGS84\")\n\n_epsg_2180_to_4326 = functools.partial(pyproj.transform, pyproj.Proj(init='epsg:2180'), pyproj.Proj(init='epsg:4326'))\n_epsg_4326_to_2180 = functools.partial(pyproj.transform, pyproj.Proj(init='epsg:4326'), pyproj.Proj(init='epsg:2180'))\n\n\ndef distance(a, b):\n \"\"\"returns distance betwen a and b points in meters\"\"\"\n if isinstance(a, shapely.geometry.base.BaseGeometry):\n point_a = a.centroid\n a = (point_a.y, point_a.x)\n if isinstance(b, shapely.geometry.base.BaseGeometry):\n point_b = b.centroid\n b = (point_b.y, point_b.x)\n return __geod.inv(a[1], a[0], b[1], b[0])[2]\n\n\ndef buffered_shape_poland(shape: shapely.geometry.base.BaseGeometry, buffer: int) -> shapely.geometry.base.BaseGeometry:\n \"\"\"\n :param shape: shape to extend\n :param buffer: buffer in meters -\n :return: object extended in each direction by buffer\n\n Uses EPSG:2180 (PUWG) to get estimated 1 m = 1 unit, so buffer will actually extend objects by one meter\n Warning: This will work only in Poland\n \"\"\"\n ret = shapely.ops.transform(_epsg_4326_to_2180, shape).buffer(buffer)\n return shapely.ops.transform(_epsg_2180_to_4326, ret)\n\n\nclass OsmDbEntry(object):\n def __init__(self, entry, raw, osmdb):\n self._entry = entry\n self._raw = raw\n self._osmdb = osmdb\n\n @property\n def entry(self):\n return self._entry\n\n @property\n def shape(self):\n return self._osmdb.get_shape(self._raw)\n \n @property\n def center(self):\n return self.shape.centroid\n\n def __getattr__(self, attr):\n return getattr(self.entry, attr)\n\n def __getitem__(self, attr):\n return self._entry[attr]\n\n def within(self, other):\n return self.shape.within(other)\n\n def contains(self, other):\n return self.shape.contains(other)\n\n def buffered_shape(self, buffer: int) -> shapely.geometry.base.BaseGeometry:\n \"\"\"\n :param buffer: buffer in meters -\n :return: object extended in each direction by buffer\n\n Uses EPSG:2180 (PUWG) to get estimated 1 m = 1 unit, so buffer will actually extend objects by one meter\n Warning: This will work only in Poland\n \"\"\"\n return buffered_shape_poland(self.shape, buffer)\n\n\nclass OsmDb(object):\n __log = logging.getLogger(__name__).getChild('OsmDb')\n\n def __init__(self, osmdata, valuefunc=lambda x: x, indexes=None, index_filter=lambda x: True):\n # assume osmdata is a BeautifulSoup object already\n # do it an assert\n if not indexes:\n indexes = {}\n self._osmdata = osmdata\n self.__custom_indexes = dict((x, {}) for x in indexes.keys())\n self._valuefunc = valuefunc\n self.__custom_indexes_conf = indexes\n self.__cached_shapes = {}\n self.__index = index.Index()\n self.__index_entries = {}\n self.__index_filter = index_filter\n\n def makegetfromindex(index_name):\n def getfromindex(key):\n return self.__custom_indexes[index_name].get(key, [])\n return getfromindex\n\n def makegetallindexed(index_name):\n def getallindexed():\n return tuple(self.__custom_indexes[index_name].keys())\n return getallindexed\n\n for i in indexes.keys():\n setattr(self, 'getby' + i, makegetfromindex(i))\n setattr(self, 'getall' + i, makegetallindexed(i))\n\n self.__osm_obj: typing.Dict[typing.Tuple[str, int], OsmDbEntry] = dict(\n (\n (x['type'], int(x['id'])),\n OsmDbEntry(self._valuefunc(x), x, self)\n ) for x in self._osmdata['elements']\n )\n self.update_index(\"[1/14]\")\n\n def update_index(self, message=\"\"):\n self.__log.debug(\"Recreating index\")\n\n self.__index = index.Index()\n self.__index_entries = {}\n self.__custom_indexes = dict((x, collections.defaultdict(list)) for x in self.__custom_indexes_conf.keys())\n\n for val in tqdm.tqdm(\n [value for value in self.__osm_obj.values() if self.__index_filter(value)],\n desc=\"{} Creating index\".format(message)\n ):\n try:\n pos = self.get_shape(val._raw).centroid\n except KeyError:\n raise KeyError(\"Problem with getting shape of {}:{}\".format(val.entry['type'], val.entry['id']))\n pos = (pos.y, pos.x)\n if pos:\n _id = _get_id(val._raw)\n self.__index.insert(_id, pos)\n\n self.__index_entries[_id] = val\n\n for custom_index_name, custom_index_func in self.__custom_indexes_conf.items():\n self.__custom_indexes[custom_index_name][custom_index_func(val)].append(val)\n\n def add_new(self, new):\n self._osmdata['elements'].append(new)\n ret = OsmDbEntry(self._valuefunc(new), new, self)\n self.__osm_obj[(new['type'], int(new['id']))] = ret\n return ret\n\n def get_by_id(self, typ: str, id_: int) -> OsmDbEntry:\n return self.__osm_obj[(typ, int(id_))]\n\n def get_all_values(self):\n return self.__osm_obj.values()\n\n def nearest(self, point, num_results=1):\n if isinstance(point, Point):\n point = (point.y, point.x)\n return map(self.__index_entries.get,\n self.__index.nearest(point * 2, num_results)\n )\n\n def intersects(self, point):\n if isinstance(point, Point):\n point = (point.y, point.x)\n return (self.__index_entries.get(x) for x in self.__index.intersection(point * 2))\n\n def get_shape(self, soup):\n id_ = soup['id']\n ret = self.__cached_shapes.get(id_)\n if not ret:\n ret = self.get_shape_cached(soup)\n self.__cached_shapes[id_] = ret\n return ret\n\n def get_shape_cached(self, soup):\n if soup['type'] == 'node':\n return Point(float(soup['lon']), float(soup['lat']))\n\n if soup['type'] == 'way':\n nodes = tuple(self.get_by_id('node', y) for y in soup['nodes'])\n if len(nodes) < 3:\n self.__log.warning(\"Way has less than 3 nodes. Check geometry. way:%s\" % (soup['id'],))\n self.__log.warning(\"Returning geometry as a point\")\n return Point(sum(x.center.x for x in nodes)/len(nodes), sum(x.center.y for x in nodes)/len(nodes))\n return Polygon((x.center.x, x.center.y) for x in nodes)\n\n if soup['type'] == 'relation':\n if soup['tags'].get('type') in ('network', 'level'):\n # shortcut for stupid relations with addresses\n return LineString(\n map(\n lambda x: x.center,\n (self.get_by_id(x['type'], x['ref']) for x in soup['members'])\n )\n ).centroid\n\n # handle relation type 'building' properly for 3D buildings\n if soup['tags'].get('type') == 'building':\n outline_members = [x for x in soup['members'] if x['role'] == 'outline']\n if len(outline_members) != 1:\n raise ValueError(\"Broken geometry for relation: %s. Missing outline role\" % (soup['id'],))\n return self.get_by_id('way', outline_members[0]['ref']).shape\n\n # returns only outer ways, no exclusion for inner ways\n # multiple outer: terc=1019042\n # inner ways: terc=1014082\n outer = []\n inner = []\n if 'members' not in soup:\n raise ValueError(\"Broken geometry for relation: %s. Relation without members.\" % (soup['id'],))\n for member in filter(lambda x: x['type'] == 'way', soup['members']):\n obj = self.get_by_id(member['type'], member['ref'])\n if member['role'] == 'outer' or not member.get('role'):\n outer.append(obj)\n if member['role'] == 'inner':\n inner.append(obj)\n\n if not outer and not inner:\n # handle broken relations without inner / outer\n outer = [\n self.get_by_id(x['type'], x['ref']) for x in soup['members'] if x['role'] in ('building', 'house')\n ]\n try:\n inner = self.get_closed_ways(inner)\n outer = self.get_closed_ways(outer)\n except ValueError:\n raise ValueError(\"Broken geometry for relation: %s\" % (soup['id'],))\n ret = None\n for out in outer:\n val = out\n for inn in filter(out.contains, inner):\n val = val.difference(inn)\n if not ret:\n ret = val\n else:\n ret = ret.union(val)\n # handle broken (only inner members) relations\n if not ret and len(outer) == 0 and len(inner) > 0:\n for val in inner:\n if not ret:\n ret = val\n else:\n ret = ret.union(val)\n if not ret:\n # TODO: maybe use bounds of relation instead?\n raise ValueError(\"Broken geometry for relation: %s\" % (soup['id'],))\n return ret\n\n def get_closed_ways(self, ways):\n if not ways:\n return []\n ways = list(ways)\n way_by_first_node = utils.groupby(ways, lambda x: x._raw['nodes'][0])\n way_by_last_node = utils.groupby(ways, lambda x: x._raw['nodes'][-1])\n ret = []\n cur_elem = ways[0]\n node_ids = []\n\n def _get_ids(elem):\n return elem['nodes']\n\n def _get_way(id_, dct):\n if id_ in dct:\n rv = tuple(filter(lambda x: x in ways, dct[id_]))\n if rv:\n return rv[0]\n return None\n\n ids = _get_ids(cur_elem)\n while ways:\n node_ids.extend(ids)\n ways.remove(cur_elem)\n if node_ids[0] == node_ids[-1]:\n # full circle, append to Polygons in ret\n ret.append(\n Polygon(\n (x.center.x, x.center.y) for x in (self.get_by_id('node', y) for y in node_ids)\n )\n )\n if ways:\n cur_elem = ways[0]\n node_ids = []\n ids = _get_ids(cur_elem)\n else:\n # not full circle\n if ways: # check if there is something to work on\n last_id = node_ids[-1]\n first_id = node_ids[0]\n if _get_way(last_id, way_by_first_node):\n cur_elem = _get_way(last_id, way_by_first_node)\n ids = _get_ids(cur_elem)\n\n elif _get_way(last_id, way_by_last_node):\n cur_elem = _get_way(last_id, way_by_last_node)\n ids = list(reversed(_get_ids(cur_elem)))\n\n elif _get_way(first_id, way_by_first_node):\n cur_elem = _get_way(first_id, way_by_first_node)\n node_ids = list(reversed(node_ids))\n ids = _get_ids(cur_elem)\n\n elif _get_way(first_id, way_by_last_node):\n cur_elem = _get_way(first_id, way_by_last_node)\n node_ids = list(reversed(node_ids))\n ids = list(reversed(_get_ids(cur_elem)))\n else:\n raise ValueError\n else: # if ways\n raise ValueError\n # end while\n return ret\n\n \ndef main():\n odb = OsmDb(open(\"adresy.osm\").read())\n print(list(odb.nearest((53.5880600, 19.5555200), 10)))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"osmdb.py","file_name":"osmdb.py","file_ext":"py","file_size_in_byte":13127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"384608215","text":"# Author: Filippo Pisello\nfrom chance import COMMUNITY_CHEST_LIST, CHANCE_LIST\nimport random\n\nchance_list2, community_chest_list2 = CHANCE_LIST.copy(), COMMUNITY_CHEST_LIST.copy()\n\nclass Space:\n \"\"\"\n Class to capture the different spaces making up the monopoly board game\n \"\"\"\n def __init__(self, position: int, type_:str, color=None):\n self.position = position\n self.type = type_\n self.color = color\n self.stops = 0\n\n def apply_effect(self, player_obj):\n if self.type == \"To jail\":\n player_obj.to_jail()\n if self.type == \"Chance\":\n self.cards(player_obj, chance_list2, CHANCE_LIST)\n if self.type == \"Community Chest\":\n self.cards(player_obj, community_chest_list2, COMMUNITY_CHEST_LIST)\n return\n\n @staticmethod\n def cards(player_obj, cards_list, original_card_list):\n \"\"\"\n Draws a random card (chance/community chest) from a set and applies its\n effect to player\n \"\"\"\n if not cards_list:\n cards_list = original_card_list.copy()\n card = random.choice(cards_list)\n cards_list.remove(card)\n card.apply_effect(player_obj)\n return\n\n# Create a list of the cells in the board\nSPACE_LIST = open(\"Cells.txt\", \"r\").read()\nSPACE_LIST = SPACE_LIST.split(\"||\")[1:]\nSPACE_LIST = [Space(int(c.split(\",\")[0]), c.split(\",\")[1], c.split(\",\")[2])\n for c in SPACE_LIST]\n","sub_path":"cell.py","file_name":"cell.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"341404881","text":"import tensorflow as tf\n\nv1=tf.Variable(tf.constant(1.0,shape=[1]),name=\"v1\")\nv2=tf.Variable(tf.constant(2.0,shape=[1]),name=\"v2\")\n\nresult=v1+v2\n\ninit_op=tf.initialize_all_variables()\n\nsaver=tf.train.Saver()\nsaver.export_meta_graph(\"path/to/mode.ckpt.meda.json\",as_text=True)\n\nwith tf.Session() as sess:\n sess.run(init_op)\n saver.save(sess,\"path/model.ckpt\")\n # saver.restore(sess,\"path/model.ckpt\")\n # print sess.run(result)","sub_path":"Test/Saver.py","file_name":"Saver.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"406295539","text":"# [G2]2266 금고 테스트\n# https://www.acmicpc.net/problem/2266\n\nimport sys\ninput = sys.stdin.readline\nINF = int(1e9)\n\nn,k = map(int,input().split())\ndp = [[0] * (n+1) for _ in range(k+1)]\nfor i in range(1, k+1):\n dp[i][1] = 1\n\nfor i in range(1, n+1):\n dp[1][i] = i\n\nfor i in range(2, k+1):\n for j in range(2, n+1):\n dp[i][j] = INF\n for f in range(1, j+1):\n res = 1 + max(dp[i-1][f-1], dp[i][j-f])\n dp[i][j] = min(dp[i][j], res)\n\nprint(dp)\nprint(dp[k][n])\n","sub_path":"BOJ/[G2]2266 금고 테스트.py","file_name":"[G2]2266 금고 테스트.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"123927206","text":"# -*- coding: utf-8 -*-\n\narray = []\nwith open('file1', 'rb') as f:\n while True:\n value = f.read(1)\n if value == '': break\n array.append(value)\nfile1 = ''.join(reversed(array))\n\nwith open('file2', 'rb') as f:\n file2 = f.read()\n\nwith open('flag.png', 'wb') as f:\n f.write(file1 + file2)\n","sub_path":"write-ups/BackdoorCTF/solver/ping.py","file_name":"ping.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"139447748","text":"import sys\nimport os\n# from survrevclas import *\n# from survrevregr import *\nfrom survrev import *\nfrom wsdm17nsr import *\nfrom aaai19drsa import *\nfrom tensorflow2_0.params import FLAGS\nimport multiprocessing\n\n\"\"\"\n* Filename: main.py\n* Implemented by Sundong Kim (sundong.kim@kaist.ac.kr)\n\nIncluded methods for performance evaluation.\n\"\"\"\n\ndef ss(ids):\n sid, gpuid = ids\n return sid+'-'+gpuid\n\ndef multirun(ids):\n sid, gpuid = ids\n survrevk = SurvRevK(store_id=sid, GPU_id=gpuid)\n survrevk.run()\n\ndef main():\n \"\"\"Main command of our survival-revisit method.\n\n Note: This implementation is for ZOYI survival dataset.\n\n Parameters (example - to fill for other methods like this)\n ----------\n y_true : array, shape = [n_samples] or [n_samples, n_classes]\n True binary labels or binary label indicators.\n\n y_score : array, shape = [n_samples] or [n_samples, n_classes]\n Target scores, can either be probability estimates of the positive\n class, co\n\n Returns (ex)\n -------\n auc : float\n\n Examples (ex)\n --------\n > y_scores = np.array([0.1, 0.4, 0.35, 0.8])\n > roc_auc_score(y_true, y_scores)\n 0.75\n \"\"\"\n\n print('FLAGS.multiprocessing: {}'.format(FLAGS.multiprocessing))\n print('FLAGS.all_data: {}'.format(FLAGS.all_data))\n print('FLAGS.previous_visits: {}'.format(FLAGS.previous_visits))\n print('FLAGS.train_epochs: {}'.format(FLAGS.train_epochs))\n\n\n if FLAGS.multiprocessing:\n p = multiprocessing.Pool(5)\n store_ids = ['store_A', 'store_B', 'store_C', 'store_D', 'store_E']\n GPU_ids = [\"5\", \"1\", \"2\", \"3\", \"0\"]\n\n print(p.map(ss, zip(store_ids, GPU_ids))) # check multiprocessing is working\n p.map(multirun, zip(store_ids, GPU_ids))\n\n else:\n survrevk = SurvRevK(store_id=FLAGS.store_id, GPU_id=\"3\")\n survrevk.run()\n\n # wsdm = WSDM(store_id=FLAGS.store_id, GPU_id=\"3\")\n # wsdm.run()\n\n # aaai19 = AAAI19(store_id=FLAGS.store_id, GPU_id=\"3\")\n # aaai19.run()\n\nif __name__ == '__main__':\n # print(device_lib.list_local_devices())\n main()","sub_path":"survival-revisit-code/keras/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"586584179","text":"__author__ = 'Dinesh'\n''' gps data filtering using day name '''\n\nfor day in range(1, 4):\n read_file_path = 'C:\\\\Users\\\\Dinesh\\\\Desktop\\\\Files\\\\bmcl0'+ str(day).zfill(2)+'-2008-01-' + str(day).zfill(2)+'.Std';\n new_file_path='C:\\\\Users\\\\Dinesh\\\\Desktop\\\\NewFiles\\\\bmcl001'+ str(day).zfill(2)+'-2008-01-' + str(day).zfill(2)+'.txt';\n new_file=open(new_file_path, 'w')\n with open(read_file_path, 'r') as f:\n lines = f.readlines()\n for x in range(0, 1440, 60):\n new_file.write(lines[x])\n \n","sub_path":"filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"575148315","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\n\"\"\"\nRoomba Space Reading Ritual consist of\n0) Preliminary: compute the 2D embeddings of his self words.\n1) Listen to Arduino, Receive in real time coordinates of new point or ending signals while the Roomba is doing the ritual through the space\n2) If point: Check if new point +/- aligned with previous 2 points of trajectory. \n If not, it means there has been a turn so it:\n - look for closer concept in his self, save concept & distance & point\n - say it aloud\n If it is:\n - replace previous point in trajectory\n3) If ending signal: trigger the full reading, i.e.:\n - clean trajectory\n - keep 3 closer concepts\n - generate Haiku from it & say it aloud\n\n\"\"\"\n#NOTE: self is world and world is self...for VA\n\n######## NOW:\n#TODO: Use satellite data or other to trigger this ? or to trigger arduino?\n\n######## SOON should do: \n#TODO: Improve 2D embeddng projection\n#TODO: What to do with words having 2 components or 3 even... torch.Size([2, 768])\n#TODO: Need CLEAN self graph? Stricter criteria to add self graph. As see there are letters like c\n#TODO: Modify back gpt2 model embeddings & test effect in generation\n#TODO: Visualisation better\n#TODO: Other tunings, parameters etc.\n\n#----------------------IMPORTS------------------------------------------\nfrom mycroft_bus_client import MessageBusClient, Message\nfrom string import punctuation\nimport random\nimport json\nimport re\nimport numpy as np\nfrom utils import pick_template, read, visualize_event_chart, update_event_data, generate_haiku, nearest_concept, initialize, approximately_colinear,redefine_embeddings\nimport time\nimport bluetooth\nfrom time import sleep\nimport sys\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\nfrom datetime import datetime\n\nfrom gingerit.gingerit import GingerIt\ngingerParser= GingerIt() #for grammar\n\n# =============================================================================\n# PARAMETERS to Update or tune\n# =============================================================================\n#------------------PATHS----------\n# #NOTE: Update Paths\nGRAPH_PATH = \"/home/unfamiliarconvenient/.mycroft/fallback-associative/graph.json\"# This path is temporary, it should refer to the fallbackassociative skill folder: /home/unfamiliarconvenient/.mycroft/fallback-associative/graph.json\"\nWORDS_PATH=\"./data/\" #Modify when...\nEMBEDDINGS_PATH=\"./custom_embeddings.json\" #where save words embeddings\nEMBEDDINGS2D_PATH=\"./custom_embeddings2D.npy\" #where save words embeddings\nREADING_EVENT_FOLDER=\"./outputs/\"\n\n#str(pathlib.Path(__file__).parent.absolute()) #may use path lib...\n\n#---------------CONSTANTS MAY TUNE------------------------------\n#to decide length trajectory roomba:\nMAX_FRAMES=80 \nMIN_FRAMES=30\n#interval where listen to roomba\nINTERVAL_LISTEN=752\n#threshold to judge if 3 points are almost aligned; sensitivity may be tuned\nCOLINEARITY_THRESHOLD=0.05 \n#NOTE: change the scale embeddings depending size room!\nEMBEDDINGS_SCALE=1\n\n\n# =============================================================================\n# INITIALISATION\n# =============================================================================\n\nprint(\"=============================================================================\")\nprint(\"*** INITIALISATION ***\")\nprint(\"=============================================================================\")\n\n#--init constants\nFILENAMES=[\"A\", \"Ad1\", \"Ad2\", \"Ad3\", \"V\", \"Vt\", \"V2\", \"V+\", \"P\", \"Pf\", \"P0\", \"PR0\", \"PR0a\", \"PR1a\", \"PR1\", \"N\", \"N2\", \"Nf\",\"Nfa\", \"Na\", \"Aa\", \"Va\", \"Nfa\", \"ism\", \"Duo\", \"Nf\", \"Ma\", \"S\", \"Sc\", \"ESS\", \"ASA\", \"ABL\", \"QU\", \"Tion\", \"Duoa\"]\n\n# use Roomba to trigger graph drawing \nglobal trigger\ntrigger = False\n\n# --import Message Bus client to communicate with Mycroft's guts\nprint(\"Setting up connection to Mycroft client...\")\nclient = MessageBusClient()\nclient.run_in_thread()\n\n#--initialize Self etc\nprint(\"Initializing Self...\")\nself_graph, dico, templates, custom_embeddings, embeddings2D=initialize(FILENAMES, GRAPH_PATH, WORDS_PATH, EMBEDDINGS_PATH, EMBEDDINGS2D_PATH)\n\n#---rescale 2D embeddings if needed, depending space\nembeddings2D=EMBEDDINGS_SCALE*embeddings2D\n\n#--to save all points trajectory\nglobal x_vals\nglobal y_vals\nx_vals = [] \ny_vals = []\n#--to save MAIN points trajectory (only \"turns\")\nglobal trajectory\ntrajectory=[]\n#--to save concepts related to the space reading, with their associated distance & closer points in the trajectory\nglobal event_data\nevent_data=dict()\n\n#set num frames\nglobal num_frames\nnum_frames=random.randint(MIN_FRAMES, MAX_FRAMES)\n#--time tracker \n#start_time = time.time()\n\n#set event id\nglobal event_id\n#NOTE: event id for now is hours:min:seconds, but could be based on satellite data rather triggering it?\nnow = datetime.now()\nevent_id=now.strftime(\"%H:%M:%S\")\n\n# Bluetooth parameters\n# Module address\nroo_addr = \"98:D3:31:F3:F6:97\"\n# Connection port\nport = 1\n# incoming data cluster size\nsize = 1\n\n\nprint(\"Ready to start the Ritual !\")\n\n# =============================================================================\n# Connect to Arduino\n# =============================================================================\n\ndef roomba_connect():\n connected = False\n while not connected:\n try:\n print(\"Connecting to Roo\")\n sock.connect((roo_addr, port))\n connected = True\n print(\"Connected to Roomba. Awaiting Data.\")\n except Exception as e:\n print(e)\n print(\"Connection failed, retrying in 5 seconds...\")\n sock.close()\n sleep(5)\n\ndef roomba_listen():\n message = ''\n while \";\" not in message:\n try:\n data = sock.recv(size).decode()\n if not data.isspace():\n message += data\n except:\n print(\"Socket disconnected. Attempting to reconnect\")\n roomba_connect()\n message = message[:-1]\n print(message)\n return message\n\n\n# =============================================================================\n# Reinit\n# =============================================================================\n\ndef reinit():\n #--init some variables:\n #--to save points trajectory\n global x_vals\n global y_vals\n x_vals = [] \n y_vals = []\n global trajectory\n trajectory=[]#for MAIN points\n global event_data\n event_data=dict()\n\n# =============================================================================\n# Spatial Ritual (& Arduino Listener)\n# =============================================================================\n\ndef spatial_ritual(i):\n \"\"\"\n Spatial Ritual:\n - Listen to coordinate Sent by Arduino\n - If new interesting point (ie turn), would look up closer Self concept and say it aloud\n - Save the event data for future use \n\n Input: int, step of the trajectory\n \"\"\"\n\n global sent\n global x_vals\n global y_vals\n global trajectory\n global event_data\n global num_frames\n global trigger\n \n print(\"Frame {}\".format(i))\n \n if i==num_frames-1: #NOTE: currently last frame save & close the plot\n #plt.savefig('./outputs/full_trajectory_event_'+ event_id+ '.png')\n print(\"Ending Spatial Dance!\") \n # Send signal to arduino to stop roomba trajectory\n sock.send('d')\n trigger = False\n plt.close()\n else:\n #---listen to Arduino\n message = roomba_listen()\n\n # if message contains coordinates\n if message and message != 'clearning' and message != 'docking':\n\n x, y = (message.split(',', 1))\n x = float(x)\n y = float(y)\n \n #--save data trajectory\n x_vals.append(x)\n y_vals.append(y)\n \n #--save plot frame\n plt.cla()\n plt.plot(x_vals, y_vals, color=\"mediumblue\", marker=\"2\",markevery=1, markersize=5, markeredgecolor=\"orangered\")\n\n #----Check if new point +/- aligned with previous 2 points of trajectory (if trajectory length >2...)\n new_point=[x,y]\n \n \n #check if new point aligned with 2 previous point if nb point >=2\n if len(trajectory)>=2:\n aligned=approximately_colinear(trajectory[-2],trajectory[-1],new_point, threshold=COLINEARITY_THRESHOLD)\n if aligned:\n #new point aligned with last 2, so replace last point with new point:\n trajectory[-1]=new_point\n #NOTE: This is a way to clean the trajectory, in the sense it removes intermediary points on the same line, \n\n else: \n #means a turn happened, so will read aloud closer previous point (beware, a lil delay as look at previous point!)\n #get idx and distance nearest concept of this point\n idx, dist=nearest_concept(embeddings2D, trajectory[-1])\n #get word attached to that idx\n new_closer_concept=list(custom_embeddings.keys())[idx]\n #NOTE: Refer to the trajectory points values to adjust EMBEDDINGS_BOUND, else would always output same concept\n print(\"--looking at trajectory point {}. Here is {}\".format(trajectory[-1], new_closer_concept))\n #say it aloud \n client.emit(Message('speak', data={'utterance': new_closer_concept}))\n \n #--update event data\n # save data of close concepts and distance\n #NOTE: beware this concept may be already in registered concept, in which case, \n # update the idx of the trajectory point only if closer than last time registered\n event_data=update_event_data(new_closer_concept, dist, len(trajectory)-1, event_data)\n\n #add new point to trajectory (at least temporarily)\n trajectory.append(new_point)\n\n else: #second point in traj\n trajectory.append(new_point)\n\n\n# =============================================================================\n# Reading event\n# =============================================================================\n\n\ndef reading_event(trajectory, custom_embeddings, embeddings2D, event_data):\n \"\"\"\n Reading of the trajectory\n Inputs:\n trajectory: list of points in 2D space send by roomba\n custom_embeddings: embedding dictionary of self concepts\n Output:\n trinity: 3 closer self concepts selected\n custom_embeddings: redefined embedding dictionary\n \n \"\"\"\n num_points=len(trajectory)\n print(\"Reading Event of a trajectory of length {}\".format(num_points))\n #NOTE: may have to work with sub trajectory if too big?\n\n # =============================================================================\n #--1-- Extract 3 Closer concepts\n # =============================================================================\n print(\"-step 1--Extract 3 closer concepts\")\n keys=list(event_data.keys())\n values=list(event_data.values())\n distances=[val[0] for val in values]\n indices=np.argsort(distances)[:3]\n trinity=[keys[i] for i in indices]\n trinity_idx=[values[i][1] for i in indices]\n trinity_trajectory=[trajectory[idx] for idx in trinity_idx]\n print(\"Event trinity Core: {}\".format(trinity))\n print(\"In correspondance with the 3 domesticoCosmic points n° {}\".format(trinity_idx))\n\n # =============================================================================\n #--2-- Haiku generation and Reading\n # =============================================================================\n print(\"-step 2---Generate Haiku\")\n haiku=generate_haiku(trinity, templates, dico, gingerParser)\n client.emit(Message('speak', data={'utterance': haiku}))\n #save it\n with open(READING_EVENT_FOLDER+ \"haiku_event_\"+ event_id+ '.txt', 'w+') as f:\n f.writelines(haiku.split(\";\"))\n\n # =============================================================================\n #--3-- Redefine embeddings of these 3 concepts\n # =============================================================================\n print(\"-step 3---Redefine embeddings of these 3 concepts\")\n custom_embeddings=redefine_embeddings(custom_embeddings, trinity)\n #save it:\n with open(EMBEDDINGS_PATH, 'w') as fp:\n json.dump(custom_embeddings, fp)\n\n return trinity, trinity_trajectory, custom_embeddings, haiku\n\n\n# =============================================================================\n# Connect to Roomba\n# =============================================================================\n\nsock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)\nroomba_connect()\n\n# =============================================================================\n# Actual Script running in loop\n# =============================================================================\n\nwhile True:\n try:\n cleaning = roomba_listen()\n if cleaning == 'cleaning':\n print(\"Trigger!\")\n trigger = True\n\n if trigger:\n print(\"=============================================================================\")\n print(\"****** Launching a new RITUAL ******+\")\n print(\"=============================================================================\")\n\n print(\"=============================================================================\")\n print(\"****** SPATIAL DANCE ******+\")\n print(\"=============================================================================\")\n # listen to Arduino trajectory in real time, save coordinates and draw graph\n #NOTE: currently stop listening after a certain number of frames. Could also be related to an ending signal (if arduino sends it...)\n plt.figure(figsize=(10,5))\n #compute for how many frames fo the ritual\n num_frames=random.randint(MIN_FRAMES, MAX_FRAMES)\n print(\"Performing spatial ritual for {} frames\".format(num_frames))\n ani = FuncAnimation(plt.gcf(), spatial_ritual, frames=num_frames, interval=INTERVAL_LISTEN, repeat=False) \n plt.show(block=True)\n trajectory = trajectory[:-1] #because the trajectory had one more point than when wee looked for concepts...\n print(\"Trajectory of length {}\".format(len(trajectory)))\n\n print(\"=============================================================================\")\n print(\"****** SPIRITUAL READING ****** \")\n print(\"=============================================================================\")\n trinity, trinity_trajectory, custom_embeddings, haiku=reading_event(trajectory, custom_embeddings, embeddings2D, event_data)\n\n print(\"=============================================================================\")\n print(\"****** ENDING ******+\")\n print(\"=============================================================================\")\n print(\"Save new Event Chart\")\n #--visualise Event Chart\n visualize_event_chart(trajectory, trinity_trajectory, haiku, event_id=event_id, output_folder=READING_EVENT_FOLDER)\n print(\"Saved new Event Chart!\")\n\n #--reinit some variables before next ritual\n reinit()\n print(\"reinitialized\")\n \n except KeyboardInterrupt:\n print(\"closing\")\n sock.close()\n sys.exit()\n\n\n\n\n\n\n#-------------------------------------------------\n#---------OLD CODE TEMPORARY KEEP \n\n# laod JSON structure\n# with open('sensordata.json') as jf:\n# data_archive = json.load(jf)\n\n# basically runs this script in a loop ? Need?\n#client.run_forever()\n\n # #-3---find closer words to each of these points\n # print(\"***Interpreting Trajectory; extracting closer concepts***\")\n # close_concepts, distances, trajectory_points=[], [], []\n # for i, point in enumerate(extracted_trajectory):\n # if (not (i == 0)) and (not (i == max_num_points-1)):\n # idx, dist=nearest_concept(embeddings2D, point)\n # key=list(words_embeddings.keys())[idx] #get corresponding concept\n # print(i, point, idx, key)\n # if key not in close_concepts:\n # close_concepts.append(key)\n # distances.append(dist)\n # trajectory_points.append(point)#point from traj is closer to\n # else:#NOTE: Currently too often same concept closer to all>>> change this! Rather ok if same?\n # j=close_concepts.index(key)\n # if distmax_num_points:\n # extracted_trajectory=trajectory\n # num_points=max_num_points\n # start=random.randint(0,num_points-max_num_points)\n # extracted_trajectory=trajectory[start:start+max_num_points]\n # print(\"Extracted a trajectory of length {}\".format(max_num_points))\n # else:\n # extracted_trajectory=trajectory","sub_path":"Roo/oikomancy.py","file_name":"oikomancy.py","file_ext":"py","file_size_in_byte":17694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"483952933","text":"def safe_pawns(pawns):\n is_safe = 0\n for i in pawns:\n if i[1] > '1':\n if i[0] != 'a' and i[0] != 'h':\n if chr(ord(i[0])-1)+chr(ord(i[1])-1) in pawns or chr(ord(i[0])+1)+chr(ord(i[1])-1) in pawns:\n is_safe += 1\n\n if i[0] == 'a':\n if chr(ord(i[0]) + 1) + chr(ord(i[1]) - 1) in pawns:\n is_safe += 1\n\n if i[0] == 'h':\n if chr(ord(i[0]) - 1) + chr(ord(i[1]) - 1) in pawns:\n is_safe += 1\n\n return is_safe\n\nif __name__ == '__main__':\n #These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert safe_pawns({\"b4\", \"d4\", \"f4\", \"c3\", \"e3\", \"g5\", \"d2\"}) == 6\n assert safe_pawns({\"b4\", \"c4\", \"d4\", \"e4\", \"f4\", \"g4\", \"e5\"}) == 1\n","sub_path":"lesson1/task1/pawn-brotherhood.py","file_name":"pawn-brotherhood.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"526361195","text":"from wsgiref.simple_server import make_server\nimport re\nimport honeypotConfig\nimport os\nimport payloadHandler\n\nclass HTTPServer(object):\n\n def __init__(self, logger):\n self.config = honeypotConfig.Config()\n self.logger=logger\n self.startServer()\n\n def loadResponse(self, environ, requestBody):\n URLSufix = environ['PATH_INFO']\n requestAnalyze = payloadHandler.Analyze(requestBody)\n \n if requestAnalyze.isMalicious():\n self.logger.warn('Recognized malicious request. Matched to \"%s\" command. Sending proper response' % requestAnalyze.getResult()['type'])\n return requestAnalyze.getResult()['response'].encode('UTF-8')\n\n if URLSufix.count('_search'):\n responseFile = '_search.json'\n else:\n responseFile = 'index.json'\n esVersion = self.config.json['general']['elasticsearchVersion']\n try:\n fileStream = open('./rest/responses/' + esVersion + '/' + responseFile)\n except IOError:\n self.logger.error('Version ' + esVersion + ' not supported')\n os._exit(1)\n response = fileStream.read()\n return response\n \n def startServer(self):\n httpd = make_server(self.config.json['server']['host'], self.config.json['server']['port'], self.eslasticsearchPot)\n self.logger.info('Serving on port %s' % self.config.json['server']['host'])\n httpd.serve_forever()\n\n def eslasticsearchPot(self, environ, start_response):\n status = '200 OK'\n headers = [('Content-type', 'application/json; charset=utf-8')]\n headers = [('X-Powered-By', 'Express')]\n\n requestBody = self.readRequestBody(environ)\n \n start_response(status, headers)\n self.logger.info(self.captureHTTPRequest(environ, requestBody))\n\n return self.loadResponse(environ, requestBody)\n \n def captureHTTPRequest(self,\n environ,\n requestBody,\n appendRemoteHost=True,\n appendMainHeader=True,\n appendRequestHeaders=True,\n appendRequestBody=True\n ):\n remoteHost = '\\n%s ' % environ['REMOTE_ADDR']\n mainHeader = '%s %s %s' % (environ['REQUEST_METHOD'],\n environ['PATH_INFO'],\n environ['SERVER_PROTOCOL'])\n \n normHeaderName = lambda header: header[5:]\n requestHeaders=''\n for val in environ:\n if not re.match('^HTTP_', val):\n continue\n requestHeaders += '\\n%s: %s' % (normHeaderName(val), environ[val])\n\n result=''\n if appendRemoteHost:\n result += remoteHost\n if appendMainHeader:\n result += mainHeader\n if appendRequestHeaders:\n result += requestHeaders\n result += '\\n\\n'\n if appendRequestBody:\n result += requestBody\n return result\n \n\n def readRequestBody(self, environ):\n try:\n inputLength = int(environ['CONTENT_LENGTH'])\n except ValueError:\n requestBody = ''\n else:\n requestBody = environ['wsgi.input'].read(inputLength)\n return requestBody\n\n\n\n\nif __name__ == '__main__':\n h = HTTPServer()\n \n","sub_path":"lib/HTTPServer.py","file_name":"HTTPServer.py","file_ext":"py","file_size_in_byte":3377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"242390475","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('homework', '0004_assignment_deliverable'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Course',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),\n ('title', models.CharField(max_length=64)),\n ('description', models.TextField()),\n ],\n ),\n migrations.CreateModel(\n name='Path',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),\n ('title', models.CharField(max_length=64)),\n ('description', models.TextField()),\n ],\n ),\n migrations.CreateModel(\n name='Topic',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),\n ('title', models.CharField(max_length=64)),\n ('description', models.TextField()),\n ('path', models.ForeignKey(to='homework.Path', related_name='tracks')),\n ],\n ),\n migrations.RenameField(\n model_name='recipe',\n old_name='point_max',\n new_name='points',\n ),\n migrations.RemoveField(\n model_name='recipe',\n name='point_min',\n ),\n migrations.AddField(\n model_name='recipe',\n name='required',\n field=models.BooleanField(default=True),\n ),\n migrations.AddField(\n model_name='course',\n name='topic',\n field=models.ForeignKey(to='homework.Topic', related_name='topics'),\n ),\n migrations.AddField(\n model_name='recipe',\n name='course',\n field=models.ForeignKey(related_name='recipes', default=1, to='homework.Course'),\n preserve_default=False,\n ),\n ]\n","sub_path":"threestrandcode/apps/homework/migrations/0005_auto_20160130_0912.py","file_name":"0005_auto_20160130_0912.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"174314332","text":"# Check Array Formation Through Concatenation\n# You are given an array of distinct integers arr and an array of integer arrays pieces, \n# where the integers in pieces are distinct. Your goal is to form arr by concatenating the arrays in pieces in any order. \n# However, you are not allowed to reorder the integers in each array pieces[i].\n# Return true if it is possible to form the array arr from pieces. Otherwise, return false.\n# Question: https://leetcode.com/problems/check-array-formation-through-concatenation/\n\n# Question: https://leetcode.com/problems/construct-binary-tree-from-preorder-and-inorder-traversal/\n\nclass Solution:\n def canFormArray(self, arr: List[int], pieces: List[List[int]]) -> bool:\n helper_dict = { each[0]: each for each in pieces }\n \n temp_array = []\n i = 0\n while i < len(arr):\n each = arr[i]\n temp = helper_dict.get(each)\n if not temp:\n return False\n i += len(temp)\n temp_array = temp_array + temp\n \n if len(temp_array) != len(arr):\n return False\n \n for temp, each in zip(temp_array, arr):\n if temp != each:\n return False\n \n return True\n ","sub_path":"checkArrayFormation.py","file_name":"checkArrayFormation.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"433187141","text":"import numpy as np\nfrom functools import partial\nfrom overrides import overrides\nfrom typing import Iterator, Tuple, List\nfrom ..batched_dataset_reader import H5BatchedDatasetReader\nfrom ..batched_dataset_reader.utils import batchIndexFromBatchSizes\nfrom ...utilities import toCategorical\n\nclass MNISTReader(H5BatchedDatasetReader):\n\tdef __init__(self, datasetPath:str, normalization:str = \"min_max_0_1\"):\n\t\tassert normalization in (\"none\", \"min_max_0_1\")\n\n\t\trgbTransform = {\n\t\t\t\"min_max_0_1\" : (lambda x : np.float32(x) / 255),\n\t\t\t\"none\" : (lambda x : x)\n\t\t}[normalization]\n\n\t\tsuper().__init__(datasetPath,\n\t\t\tdataBuckets = {\"data\" : [\"images\"], \"labels\" : [\"labels\"]},\n\t\t\tdimTransform = {\n\t\t\t\t\"data\" : {\"images\" : rgbTransform},\n\t\t\t\t\"labels\" : {\"labels\" : lambda x : toCategorical(x, numClasses=10)}\n\t\t\t}\n\t\t)\n\n\t@overrides\n\tdef __len__(self) -> int:\n\t\treturn len(self.getDataset()[\"images\"])\n\n\t@overrides\n\tdef __getitem__(self, index):\n\t\titem, B = super().__getitem__(index)\n\t\treturn (item[\"data\"], item[\"labels\"][\"labels\"]), B\n","sub_path":"neural_wrappers/readers/datasets/mnist_reader.py","file_name":"mnist_reader.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"555140477","text":"'''\nCreated on 5 Aug 2011\n\n@author: wuji\n'''\nimport math\ndef a_star_2d(start, goal, map):\n ''' The set of nodes already evaluated '''\n closedset = set()\n ''' The set of tentative nodes to be evaluated '''\n openset = set()\n openset.add(start.coordinates)\n ''' The map of navigated nodes. '''\n came_from = dict()\n f_score = dict()\n g_score = dict()\n h_score = dict()\n \n ''' Cost from start along best known path. '''\n g_score[start.coordinates] = 0 \n h_score[start.coordinates] = heuristic_cost_estimate(start.coordinates, goal.coordinates)\n ''' Estimated total cost from start to goal through y. '''\n f_score[start.coordinates] = h_score[start.coordinates] \n \n while not len(openset) == 0:\n x = get_node_with_lowest_score(openset, f_score)\n if x == goal.coordinates:\n return reconstruct_path(came_from, came_from[goal.coordinates])\n openset.remove(x)\n closedset.add(x)\n for neighbor in neighbor_nodes(x, map):\n y = neighbor.coordinates\n if not y in closedset:\n tentative_g_score = g_score[x] + dist_between(x, y)\n \n if not y in openset:\n openset.add(y)\n tentative_is_better = True\n elif tentative_g_score < g_score[y]:\n tentative_is_better = True\n else:\n tentative_is_better = False\n if tentative_is_better:\n came_from[y] = x\n g_score[y] = tentative_g_score\n h_score[y] = heuristic_cost_estimate(y, goal.coordinates)\n f_score[y] = g_score[y] + h_score[y]\n return None\n\ndef reconstruct_path(came_from, current_node):\n if current_node in came_from:\n p = reconstruct_path(came_from, came_from[current_node])\n p.append(current_node)\n return p\n else:\n p = []\n p.append(current_node)\n return p\ndef heuristic_cost_estimate(start, goal):\n return dist_between(start, goal)\n\ndef get_node_with_lowest_score(tile_set, score):\n lowest = tile_set.pop()\n tile_set.add(lowest)\n for tile in tile_set:\n if score[tile] < score[lowest]:\n lowest = tile\n \n return lowest\n\ndef neighbor_nodes(tile, map):\n return map.get_neighbours(tile[0] ,tile[1])\n\ndef dist_between(coordinate1, coordinate2):\n x = coordinate1[0] - coordinate2[0]\n y = coordinate1[1] - coordinate2[1]\n return math.hypot(x, y)\n","sub_path":"src/game/swarm/ai.py","file_name":"ai.py","file_ext":"py","file_size_in_byte":2546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"338727499","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport os\nimport pandas as pd\n\n\n#beta step 2.5 deg 0 - 355\n#alpha step --- 0 - 46\n\n\nfilename = '/Users/ivanov/Yandex.Disk.localized/DESY_2018/Ti/-70_45_110_cr80ep.txt'\netalonname = '/Users/ivanov/Yandex.Disk.localized/DESY_2018/Ti/-70_45_110_etalon__I1.txt'\n\ndata = np.loadtxt(filename)\netalon = np.loadtxt(etalonname)\n\ntexture_map = []\nfor i in range(46):\n texture_map.append(data[i*143:i*143+143,1])\n\netalon_map = []\nfor i in range(46):\n etalon_map.append(etalon[i*143:i*143+143,1])\n\n\ntexture_map = np.array(texture_map)/np.max(texture_map)\netalon_map = np.array(etalon_map)/np.max(etalon_map)\n\n#texture_map.reshape(143,46)\n\nplt.subplot(211)\nplt.imshow(texture_map, cmap=cm.jet)\nplt.subplot(212)\nplt.imshow(etalon_map, cmap=cm.jet)\n#plt.show()\nplt.clf()\n\nsubstract_map = texture_map - etalon_map\nsubstract_map[substract_map<0]=0\n\nbeta = np.radians(np.linspace(0, 355, 143))\nalpha = np.radians(np.linspace(0, 46, 46))\nB, A = np.meshgrid (beta, alpha)\n\nax = plt.subplot(111, polar=True)\nax.set_yticklabels([])\nax.set_xticklabels([])\n\nctf = ax.contourf(B, A, substract_map, 300, cmap=cm.jet)\nplt.colorbar(ctf)\nplt.grid(False)\nplt.show()\n\n#savefile = os.path.join(savedir, \"polar_fig_%s\"%peaks[i] + \".png\")\n#plt.savefig(savefile, dpi=300)\nplt.clf()","sub_path":"DESY_2018/Ti/texture_xrd.py","file_name":"texture_xrd.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"539206029","text":"# import sys\n# sys.stdin = open('input.txt')\n\n# 인덱스를 인자로 받아 길이가 L인 단어 완성하는 함수\ndef sol(idx):\n stack = []\n stack.append([lst[idx]])\n while stack:\n word = stack.pop()\n # 길이가 L이라면\n if len(word) == L:\n # 모음이 한 개 이상이고, 자음이 두 개 이상인지 확인\n if len([w for w in word if w in ['a','e','i','o','u']])>=1 and len([w for w in word if w not in ['a','e','i','o','u']])>=2:\n result.append(''.join(word))\n # 길이가 L이 아니라면 남은 리스트 중에 이전 글자보다 큰 글자 중에서 골라 담음\n for x in lst[idx+1:]:\n if x>word[-1] and len(word) m):\n Sigma = 1.0 / n * np.dot(np.transpose(A), A)\n U, s, V = np.linalg.svd(Sigma, full_matrices=True)\n U_reduced = U[:, : k]\n Z = np.transpose(U_reduced)\n return Z\n else:\n Sigma = 1.0 / n * np.dot(A, np.transpose(A))\n U, s, V = np.linalg.svd(Sigma, full_matrices=True)\n U_reduced = np.dot(np.transpose(A), U)\n # U_red=np.linalg(U_reduced)\n U_red = U_reduced[:, : k]\n Z = np.transpose(U_red)\n return Z\n\n\ndef calculate_score(data, label, w, threshold, cl):\n pred = np.zeros((data.shape[0], 1), np.float32)\n for i in range(data.shape[0]):\n if (np.dot(w, data[i, :].reshape(data.shape[1], 1)) > threshold):\n pred[i, 0] = cl\n else:\n if (cl == 0):\n pred[i, 0] = 1\n else:\n pred[i, 0] = 0\n # print(pred)\n error = 0.00\n for i in range(data.shape[0]):\n if (pred[i, 0] != label[i]):\n error = error + 1\n return error / data.shape[0]\n\n\nZ = np.zeros((20, 101, 101))\nlabel_Z = np.zeros((20, 1))\ni = 0\npath='/home/rohitk/Desktop/MLSP/a3/Data/emotion_classification/train'\n\nfor image in os.listdir('/home/rohitk/Desktop/MLSP/a3/Data/emotion_classification/train'):\n Z[i]=plt.imread(os.path.join(path, image))\n if (image.split(\".\")[1]==\"happy\"):\n label_Z[i,:]=1\n else:\n label_Z[i,:]=0\n\n i = i+1\n\n# print(Z.shape)\nZ = Z.reshape((20, 101 * 101))\n# print(Z.shape)\n\nO = np.zeros((10, 101, 101))\nlabel_O = np.zeros((10, 1))\ni = 0\n\npath='/home/rohitk/Desktop/MLSP/a3/Data/emotion_classification/test'\n\nfor image in os.listdir('/home/rohitk/Desktop/MLSP/a3/Data/emotion_classification/test'):\n O[i]=plt.imread(os.path.join(path, image))\n\n if (image.split(\".\")[1]==\"happy\"):\n label_O[i,:]=1\n else:\n label_O[i,:]=0\n\n i = i+1\n\n# print(O.shape)\nO = O.reshape((10, 101 * 101))\n# print(O.shape)\ny = np.ones((20, 1))\n#######################################################################################################################\n\n####################varying the value of C\nmaxq = 0\nvarying_C_18 = []\nfor k in range(18, 19):\n Z_after_pca = np.transpose(np.dot(PCA_transform(Z, k), np.transpose(Z)))\n print(np.shape(Z_after_pca))\n O_after_pca = np.transpose(np.dot(PCA_transform(Z, k), np.transpose(O)))\n C = range(1, 101, 1)\n for c in C:\n clf_polynomial = svm.SVC(C=c, cache_size=200, class_weight=None, coef0=0.0,\n decision_function_shape='ovo', degree=5, gamma='auto', kernel='poly',\n max_iter=100, probability=False, random_state=None, shrinking=True,\n tol=0.001, verbose=False)\n clf_polynomial.fit(Z_after_pca, label_Z)\n accuracy = clf_polynomial.score(O_after_pca, label_O) * 100\n varying_C_18 = varying_C_18 + [accuracy]\n if (accuracy > maxq):\n best_c = c\n maxq = accuracy\n print(\"1\", k, c, accuracy)\n#######################################################################################################################\n#######################varying the value of K\nmaxq = 0\nvarying_k_67 = []\nfor k in range(1, 101, 1):\n Z_after_pca = np.transpose(np.dot(PCA_transform(Z, k), np.transpose(Z)))\n O_after_pca = np.transpose(np.dot(PCA_transform(Z, k), np.transpose(O)))\n c = best_c\n clf_polynomial = svm.SVC(C=c, cache_size=200, class_weight=None, coef0=0.0,\n decision_function_shape='ovo', degree=5, gamma='auto', kernel='poly',\n max_iter=100, probability=False, random_state=None, shrinking=True,\n tol=0.001, verbose=False)\n clf_polynomial.fit(Z_after_pca, label_Z)\n accuracy = clf_polynomial.score(O_after_pca, label_O) * 100\n varying_k_67 = varying_k_67 + [accuracy]\n if (accuracy > maxq):\n best_k = k\n maxq = accuracy\n print(\"2\", k, c, accuracy)\n####################################################################################\n#######################varying the degree\nvarying_degree = []\nfor deg in range(1, 101, 1):\n k = best_k\n Z_after_pca = np.transpose(np.dot(PCA_transform(Z, k), np.transpose(Z)))\n O_after_pca = np.transpose(np.dot(PCA_transform(Z, k), np.transpose(O)))\n c = best_c\n clf_polynomial = svm.SVC(C=c, cache_size=200, class_weight=None, coef0=0.0,\n decision_function_shape='ovo', degree=deg, gamma='auto', kernel='poly',\n max_iter=100, probability=False, random_state=None, shrinking=True,\n tol=0.001, verbose=False)\n clf_polynomial.fit(Z_after_pca, label_Z)\n accuracy = clf_polynomial.score(O_after_pca, label_O) * 100\n varying_degree = varying_degree + [accuracy]\n\n print(\"2\", deg, accuracy)\n\nplt.figure()\nplt.plot(range(1, 101), varying_C_18, label='PCA dim=18', marker='.',\n markersize=5, color='r')\nplt.ylabel('Test accuracy of polynomial SVM classifier with varying C')\nplt.xlabel('range of Cost C')\nplt.legend()\nplt.title('Test accuracy of polynomial SVM classifier with varying C')\nplt.savefig('/home/rohitk/Desktop/MLSP/a3/' + 'SVM_polynomial_C_vary' + '.png') ###################### point to output directory\n\nplt.figure()\nplt.plot(range(1, 101), varying_k_67, label=\"fixed cost C=\" + str(best_c), marker='.',\n markersize=5, color='r')\nplt.ylabel('Test accuracy of polynomial SVM classifier with varying k(dim) in PCA')\nplt.xlabel('range of k(dim) in PCA')\nplt.legend()\nplt.title('Test Accuracy of polynomial SVM classifier with varying k in pca')\nplt.savefig('/home/rohitk/Desktop/MLSP/a3/' + 'SVM_polynomial_k_vary' + '.png') ###################### point to output directory\n\nplt.figure()\nplt.plot(range(1, 101), varying_degree, label=\"fixed cost C=\" + str(best_c) + \" fixed dim k=\" + str(best_k), marker='.',\n markersize=5, color='r')\nplt.ylabel('Test accuracy of polynomial SVM classifier with varying degree in poly')\nplt.xlabel('range of degree in Polynomial')\nplt.legend()\nplt.title('Test Accuracy of polynomial SVM classifier with varying degree in poly')\nplt.savefig('/home/rohitk/Desktop/MLSP/a3/' + 'SVM_degree_vary' + '.png') ###################### point to output directory\n\n\n\n\n","sub_path":"Assignment_3/Question4_poly_svm.py","file_name":"Question4_poly_svm.py","file_ext":"py","file_size_in_byte":6809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"111496794","text":"from json import dumps, loads\nfrom logging import getLogger\nfrom threading import Thread\n\nfrom .api import aggregate\nfrom .protocol import *\n\n__all__ = [\"Connection\"]\n\nclass Connection(object):\n \"\"\"Handles an open connection between the server and the JS client.\"\"\"\n\n def __init__(self, socket, database):\n self._socket = socket\n self._client = \"%04d\" % (id(self._socket) % 10000)\n self._state = STATE_WAITING\n self._database = database\n self._document = None\n self._processed = []\n\n self._logger = getLogger(\"gunicorn.error\")\n self._log(\"INFO\", \"Connection opened.\")\n\n def _log(self, event, data):\n \"\"\"Send a debug message to the terminal.\"\"\"\n events = {\n \"INFO\": (self._logger.info, u\"\\x1b[33m{0} \\x1b[36m!!\\x1b[0m {1}\"),\n \"SEND\": (self._logger.debug, u\"\\x1b[33m{0} \\x1b[31m<-\\x1b[0m {1}\"),\n \"RECV\": (self._logger.debug, u\"\\x1b[33m{0} \\x1b[32m->\\x1b[0m {1}\")\n }\n func, template = events[event]\n func(template.format(self._client, data).encode(\"utf8\"))\n\n def _send(self, verb, payload=None):\n \"\"\"Send data to the client.\"\"\"\n data = (verb + \" \" + payload) if payload else verb\n self._log(\"SEND\", data)\n self._socket.send(data)\n\n def _error(self, reply=REPLY_INVALID):\n \"\"\"Client has sent bad data; close the connection with an error.\"\"\"\n self._state = STATE_CLOSING\n self._send(SVERB_INVALID, reply)\n\n def _handle_keywords(self, keywords):\n \"\"\"Handle a keyword update in the document. Maybe reply with stuff.\"\"\"\n def inner():\n for keyword in keywords:\n if keyword in self._processed:\n continue\n self._processed.append(keyword)\n for box in aggregate(keyword):\n if self._state != STATE_READY:\n return\n self._send(SVERB_UPDATE, dumps(box))\n\n thread = Thread(target=inner)\n thread.daemon = True\n thread.start()\n\n def _handle_state_waiting(self, verb, data):\n \"\"\"Handle input from the client when in the \"waiting\" state.\"\"\"\n if verb == CVERB_OPEN:\n self._state = STATE_READY\n self._document = doc = self._database.get_document(data)\n if not doc:\n self._error(REPLY_NODOC)\n return\n payload = {\"title\": doc.title, \"text\": doc.text}\n self._send(SVERB_READY, dumps(payload))\n self._handle_keywords(doc.keywords)\n if not self._database.lock_document(doc.docid):\n self._error(REPLY_LOCKED)\n del self._document\n return\n else:\n self._error()\n\n def _handle_state_ready(self, verb, data):\n \"\"\"Handle input from the client when in the \"ready\" state.\"\"\"\n if verb == CVERB_UPDATE:\n try:\n data = loads(data)\n except ValueError:\n self._error()\n return\n if \"title\" in data:\n self._document.title = data[\"title\"]\n if \"text\" in data:\n self._document.text = data[\"text\"]\n if \"keywords\" in data:\n self._handle_keywords(data[\"keywords\"])\n self._database.save_document(self._document)\n elif verb == CVERB_CLOSE:\n self._state = STATE_CLOSING\n self._send(SVERB_BYE)\n else:\n self._error()\n\n def handle(self):\n \"\"\"Handle the main server/client connection loop.\"\"\"\n while self._state != STATE_CLOSING:\n data = self._socket.receive()\n if data is None:\n self._state = STATE_CLOSING\n break\n data = data.strip()\n self._log(\"RECV\", data)\n if not data:\n self._error()\n break\n try:\n verb, data = data.split(\" \", 1)\n except ValueError:\n verb, data = data, None\n if self._state == STATE_WAITING:\n self._handle_state_waiting(verb, data)\n elif self._state == STATE_READY:\n self._handle_state_ready(verb, data)\n\n def finish(self):\n \"\"\"Close the connection and save all data.\"\"\"\n if self._document:\n self._database.save_document(self._document)\n self._database.unlock_document(self._document.docid)\n self._log(\"INFO\", \"Connection closed.\")\n","sub_path":"omnithinker/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":4546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"624536363","text":"import os.path\n\nimport conans\n\n\nclass Lp3Main(conans.ConanFile):\n name = \"acme-c\"\n version = \"1.0.0\"\n license = \"Zlib\"\n author = \"Tim Simpson\"\n\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n options = {\"shared\": [True, False]}\n default_options = {\"shared\": False}\n\n requires = [\n \"acme-a/1.0.0@TimSimpson/testing\",\n \"acme-b/1.0.0@TimSimpson/testing\",\n ]\n\n generators = \"cmake_find_package\"\n\n exports_sources = (\n \"src/*\", \"include/*\", \"demos/*\", \"tests/*\", \"CMakeLists.txt\", \"cmake/*\",\n )\n\n def _configed_cmake(self):\n cmake = conans.CMake(self)\n cmake.configure()\n return cmake\n\n def build(self):\n cmake = self._configed_cmake()\n cmake.build()\n\n def package(self):\n cmake = self._configed_cmake()\n cmake.install()\n\n def package_info(self):\n def set_cmake_options(attribute, value):\n for generator in ['cmake_find_package', 'cmake_find_package_multi']:\n attribute[generator] = value\n\n self.cpp_info.name = \"acme-c\"\n set_cmake_options(self.cpp_info.filenames, \"acme-c\")\n set_cmake_options(self.cpp_info.names, \"ACME\")\n set_cmake_options(self.cpp_info.components['c'].names, \"c\")\n self.cpp_info.components['c'].libs = [ \"acme-c\" ]\n # In CMake, the exported target will be ACME::a but it comes from the\n # COnan package named `acme-a`, which has a component named `a`.\n self.cpp_info.components['c'].requires = [ \"acme-a::a\", \"acme-b::b\" ]\n","sub_path":"c/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"520310769","text":"import pandas as pd\n\ndf = pd.read_csv('tips.csv')\n\nimport numpy as np\n\nnp_vals = df.values\n\nnp_vals_log10 = np.log10(np_vals)\ndf_log10 = np.log10(df)\n\nprint(type(np_vals), type(np_vals_log10))\nprint(type(df), type(df_log10))\n\n","sub_path":"pandas_zip_lists_to_build_dataframe.py","file_name":"pandas_zip_lists_to_build_dataframe.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"388371404","text":"# -*- coding: utf-8 -*-\n\"\"\"\nModule to interact with the Oxford Triton controller pc.\nUses TCP/IP sockets to communicate with the GPIB device.\n\nVersion 3.1.1 (2023-06-23)\nDaan Wielens - Researcher at ICE/QTM\nUniversity of Twente\nd.h.wielens@utwente.nl\n\n----------------------------------------------------------------------------\nThe latest version (3.0 and above) uses Python's \"exec\" function\nto reduce the number of lines in the code. Functions that are to be repeated\nfor many channels (such as read_temp1, read_temp2, ..., read_temp16) are \ngenerated in an exec loop.\n----------------------------------------------------------------------------\n\"\"\"\n\nimport socket\n\ndef convertUnits(val):\n if val[-1] == 'n':\n val = float(val[:-1] + 'E-9')\n elif val[-1] == 'u':\n val = float(val[:-1] + 'E-6')\n elif val[-1] == 'm':\n val = float(val[:-1] + 'E-3')\n return val\n \n\nclass WrongInstrErr(Exception):\n \"\"\"\n A connection was established to the instrument, but the instrument\n is not an Oxford Triton controller. Please retry with the correct\n address. Make sure that each device has an unique address.\n \"\"\"\n pass\n\nclass Triton:\n type = 'Oxford Triton'\n\n def __init__(self, IPaddress, port=33576):\n # Port should be a number, not a string\n if not isinstance(port, int):\n port = int(port)\n # Prepare socket instance\n self.s = socket.socket()\n self.s.connect((IPaddress, port))\n\n def close(self):\n self.s.close()\n\n def query(self, val):\n self.s.sendall((val + '\\r\\n').encode())\n resp = self.s.recv(1024).decode()\n return resp\n\n # Create functions for reading any temperature sensor (chan. 1-16) in the system\n for i in range(16):\n exec(\"def read_temp\" + str(i+1) + \"(self):\\n\" +\n \" self.s.sendall(('READ:DEV:T\" + str(i+1) + \":TEMP:SIG:TEMP\\\\r\\\\n').encode())\\n\" +\n \" resp = self.s.recv(1024).decode().split(':')[-1].strip('K\\\\n')\\n\" +\n \" return float(resp)\")\n \n # Create functions for reading if any temperature channel is enabled (chan. 1-16) in the system\n for i in range(16):\n exec(\"def read_Tenab\" + str(i+1) + \"(self):\\n\" +\n \" self.s.sendall(('READ:DEV:T\" + str(i+1) + \":TEMP:MEAS:ENAB\\\\r\\\\n').encode())\\n\" +\n \" resp = self.s.recv(1024).decode().split(':')[-1].strip('\\\\n')\\n\" +\n \" return resp\")\n\n # Create functions for writing whether any temperature channel must be enabled/disabled (chan. 1-16) in the system\n # Provide 'ON' or 'OFF' as \n for i in range(16):\n exec(\"def write_Tenab\" + str(i+1) + \"(self, val):\\n\" +\n \" self.s.sendall(('SET:DEV:T\" + str(i+1) + \":TEMP:MEAS:ENAB:' + str(val) + '\\\\r\\\\n').encode())\\n\" +\n \" resp = self.s.recv(1024).decode()\")\n \n # Create functions for reading any pressure sensor (chan. 1-6) in the system\n for i in range(16):\n exec(\"def read_pres\" + str(i+1) + \"(self):\\n\" +\n \" self.s.sendall(('READ:DEV:P\" + str(i+1) + \":PRES:SIG:PRES\\\\r\\\\n').encode())\\n\" +\n \" resp = self.s.recv(1024).decode().split(':')[-1].strip('B\\\\n')\\n\" +\n \" return convertUnits(resp)\")\n \n # Create functions for reading any valve actuator (chan. 1-9) in the system\n for i in range(9):\n exec(\"def read_valve\" + str(i+1) + \"(self):\\n\" +\n \" self.s.sendall(('READ:DEV:V\" + str(i+1) + \":VALV:SIG:STATE\\\\r\\\\n').encode())\\n\" +\n \" resp = self.s.recv(1024).decode().split(':')[-1].strip('\\\\n')\\n\" +\n \" return resp\")\n \n # Create functions for writing whether any valve actuactor must be opened/closed (chan. 1-9) in the system\n # Provide 'OPEN' or 'CLOSE' or 'TOGGLE' as \n for i in range(16):\n exec(\"def write_valve\" + str(i+1) + \"(self, val):\\n\" +\n \" self.s.sendall(('SET:DEV:V\" + str(i+1) + \":VALV:SIG:STATE:' + str(val) + '\\\\r\\\\n').encode())\\n\" +\n \" resp = self.s.recv(1024).decode()\")\n \n # Get the temperature control channel \n def read_Tchan(self):\n for i in range(16):\n self.s.sendall(('READ:DEV:T' + str(i+1) + ':TEMP:LOOP:MODE\\r\\n').encode())\n msg = self.s.recv(1024).decode().split(':')[-1].strip('A\\n')\n if not msg == 'NOT_FOUND':\n resp = i+1\n return resp \n \n # Select the temperature control channel\n def write_Tchan(self, val):\n self.s.sendall(('SET:DEV:T' + str(val) + ':TEMP:LOOP:HTR:H1\\r\\n').encode())\n self.s.recv(1024).decode() \n \n # Read the temperature setpoint of the heater (using the control channel as read from read_Tchan)\n def read_Tset(self):\n chan = self.read_Tchan()\n self.s.sendall(('READ:DEV:T' + str(chan) + ':TEMP:LOOP:TSET\\r\\n').encode())\n resp = float(self.s.recv(1024).decode().split(':')[-1].strip('K\\n'))\n return resp\n \n # Write the temperature setpoint of the heater (using read_Tchan)\n def write_Tset(self, val):\n chan = self.read_Tchan()\n self.s.sendall(('SET:DEV:T' + str(chan) + ':TEMP:LOOP:TSET:' + str(val) + '\\r\\n').encode())\n self.s.recv(1024)\n \n # Read PID settings for the heater (using read_Tchan)\n def read_PID(self):\n chan = self.read_Tchan()\n self.s.sendall(('READ:DEV:T' + str(chan) + ':TEMP:LOOP:P\\r\\n').encode())\n p = self.s.recv(1024).decode().split(':')[-1].strip('\\n')\n self.s.sendall(('READ:DEV:T' + str(chan) + ':TEMP:LOOP:I\\r\\n').encode())\n i = self.s.recv(1024).decode().split(':')[-1].strip('\\n')\n self.s.sendall(('READ:DEV:T' + str(chan) + ':TEMP:LOOP:D\\r\\n').encode())\n d = self.s.recv(1024).decode().split(':')[-1].strip('\\n')\n return [p, i, d]\n \n # Write PID settings for the heater (using the control channel from read_Tchan)\n def write_PID(self, p, i, d):\n chan = self.read_Tchan()\n self.s.sendall(('SET:DEV:T' + str(chan) + ':TEMP:LOOP:P:' + str(p) + '\\r\\n').encode())\n self.s.recv(1024)\n self.s.sendall(('SET:DEV:T' + str(chan) + ':TEMP:LOOP:I:' + str(i) + '\\r\\n').encode())\n self.s.recv(1024)\n self.s.sendall(('SET:DEV:T' + str(chan) + ':TEMP:LOOP:D:' + str(d) + '\\r\\n').encode())\n self.s.recv(1024)\n\n # Turn on the closed heater loop (using the control channel from read_Tchan) \n def loop_on(self):\n chan = self.read_Tchan()\n self.s.sendall(('SET:DEV:T' + str(chan) + ':TEMP:LOOP:MODE:ON\\r\\n').encode())\n self.s.recv(1024)\n\n # Turn off the closed heater loop (using the control channel from read_Tchan)\n def loop_off(self):\n chan = self.read_Tchan()\n self.s.sendall(('SET:DEV:T' + str(chan) + ':TEMP:LOOP:MODE:OFF\\r\\n').encode())\n self.s.recv(1024)\n\n # Read the loop status (from read_Tchan)\n def read_loop(self):\n chan = self.read_Tchan()\n self.s.sendall(('READ:DEV:T' + str(chan) + ':TEMP:LOOP:MODE\\r\\n').encode())\n resp = self.s.recv(1024).decode().split(':')[-1].strip('K\\n')\n return resp\n \n # Read the heater range (by using read_Tchan)\n def read_range(self):\n chan = self.read_Tchan()\n self.s.sendall(('READ:DEV:T' + str(chan) + ':TEMP:LOOP:RANGE\\r\\n').encode())\n resp = self.s.recv(1024).decode().split(':')[-1].strip('A\\n')\n return convertUnits(resp)\n \n # Write the heater range (by using read_Tchan)\n def write_range(self, val):\n chan = self.read_Tchan()\n self.s.sendall(('SET:DEV:T' + str(chan) + ':TEMP:LOOP:RANGE: ' + str(val) + '\\r\\n').encode())\n self.s.recv(1024)\n\n # Write the temperature control ramp rate (using read_Tchan)\n def write_Trate(self, val):\n chan = self.read_Tchan()\n self.s.sendall(('SET:DEV:T' + str(chan) + ':TEMP:LOOP:RAMP:RATE:' + str(val) + '\\r\\n').encode())\n self.s.recv(1024)\n\n # Read the temperature control ramp rate (using read_Tchan)\n def read_Trate(self):\n chan = self.read_Tchan()\n self.s.sendall(('READ:DEV:T' + str(chan) + ':TEMP:LOOP:RAMP:RATE\\r\\n').encode())\n resp = self.s.recv(1024).decode().split(':')[-1].strip('K/min\\n')\n return resp\n \n # Read the control temperature ramp status (enabled/disabled)\n def read_ratestatus(self):\n chan = self.read_Tchan()\n self.s.sendall(('READ:DEV:T' + str(chan) + ':TEMP:LOOP:RAMP:ENAB\\r\\n').encode())\n resp = self.s.recv(1024).decode().split(':')[-1].strip('\\n')\n if resp == 'ON':\n return 1\n elif resp == 'OFF':\n return 0\n else:\n raise ValueError('Expected to receive \"ON\" or \"OFF\" but got a different response.')\n \n # Write the control temperature ramp status (use 'ON' or 'OFF' as )\n def write_ratestatus(self, val):\n chan = self.read_Tchan()\n self.s.sendall(('SET:DEV:T' + str(chan) + ':TEMP:LOOP:RAMP:ENAB:' + str(val) + '\\r\\n').encode())\n self.s.recv(1024)\n\n def write_Hchamber(self, val):\n # Setpoint is in uW\n self.s.sendall(('SET:DEV:H1:HTR:SIG:POWR:' + str(val) + '\\r\\n').encode())\n self.s.recv(1024)\n\n def read_Hchamber(self):\n self.s.sendall('READ:DEV:H1:HTR:SIG:POWR\\r\\n'.encode())\n resp = self.s.recv(1024).decode().split(':')[-1].strip('W\\n')\n resp = convertUnits(resp)\n return resp\n\n def write_Hstill(self, val):\n # Setpoint is in uW\n self.s.sendall(('SET:DEV:H2:HTR:SIG:POWR:' + str(val) + '\\r\\n').encode())\n self.s.recv(1024)\n\n def read_Hstill(self):\n self.s.sendall('READ:DEV:H2:HTR:SIG:POWR\\r\\n'.encode())\n resp = self.s.recv(1024).decode().split(':')[-1].strip('W\\n')\n resp = convertUnits(resp)\n return resp\n \n def read_status(self):\n self.s.sendall('READ:SYS:DR:STATUS\\r\\n'.encode())\n resp = self.s.recv(1024).decode().split(':')[-1].strip('\\n')\n return resp \n\n def read_action(self):\n self.s.sendall('READ:SYS:DR:ACTN\\r\\n'.encode())\n resp = self.s.recv(1024).decode().split(':')[-1].strip('\\n')\n if resp == 'PCL':\n return 'Precooling'\n elif resp == 'EPCL':\n return 'Empty precool loop'\n elif resp == 'COND':\n return 'Condensing'\n elif resp == 'NONE':\n if self.read_temp5() < 1.5:\n return 'Condensing and circulating'\n else:\n return 'Idle'\n elif resp == 'COLL':\n return 'Collecting the mixture'\n else:\n return 'Unknown'\n \n # Read the speed of the turbo pump\n def read_turbspeed(self):\n self.s.sendall('READ:DEV:TURB1:PUMP:SIG:SPD\\r\\n'.encode())\n resp = float(self.s.recv(1024).decode().split(':')[-1].strip('Hz\\n'))\n return resp \n \n # Read the status (on/off) of the turbo pump\n def read_turbstate(self):\n self.s.sendall('READ:DEV:TURB1:PUMP:SIG:STATE\\r\\n'.encode())\n resp = self.s.recv(1024).decode().split(':')[-1].strip('\\n')\n return resp \n \n # Set state of the turbo\n def write_turbstate(self, val):\n self.s.sendall(('SET:DEV:TURB1:PUMP:SIG:STATE:' + val + '\\r\\n').encode())\n self.s.recv(1024)\n \n # Read the cumulative operational hours of the turbo pump\n def read_turbhours(self):\n self.s.sendall('READ:DEV:TURB1:PUMP:SIG:HRS\\r\\n'.encode())\n resp = float(self.s.recv(1024).decode().split(':')[-1].strip('h\\n'))\n return resp\n \n # Read the status (on/off) of the 3He compressor\n def read_compstate(self):\n self.s.sendall('READ:DEV:COMP:PUMP:SIG:STATE\\r\\n'.encode())\n resp = self.s.recv(1024).decode().split(':')[-1].strip('\\n')\n return resp \n\n # Read the status (on/off) of the 3He compressor\n def read_fpstate(self):\n self.s.sendall('READ:DEV:FP:PUMP:SIG:STATE\\r\\n'.encode())\n resp = self.s.recv(1024).decode().split(':')[-1].strip('\\n')\n return resp \n \n # Read the status (on/off) of the PTR compressor\n def read_PTRstate(self):\n self.s.sendall('READ:DEV:C1:PTC\\r\\n'.encode())\n resp = self.s.recv(1024).decode().split(':')[-1].strip('\\n')\n # Note that this response gets ALL parameters of the PTR compressor!\n return resp \n \n # Read the list of assigned temperature channels in the Triton software\n def read_Tchandefs(self):\n self.s.sendall('READ:SYS:DR:CHAN\\r\\n'.encode())\n resp = self.s.recv(1024).decode().split(':')\n chan_still = 'Still: ' + resp[5][-1]\n chan_mix = 'Mixing chamber: ' + resp[7][-1]\n chan_cool = 'Cooldown: ' + resp[9][-1]\n chan_pt1 = 'PT1: ' + resp[11][-1]\n chan_pt2 = 'PT2: ' + resp[13][-1]\n return [chan_still, chan_mix, chan_cool, chan_pt1, chan_pt2]\n \n def info(self):\n print('-----------------------------------------------------')\n print('System status: ' + str(self.read_status()))\n print('Automation task: ' + str(self.read_action()))\n print('-----------------------------------------------------')\n # Get cooldown channel and then request temperature value and status of that channel\n chan_cool = str(self.read_Tchandefs()[2].split(':')[1].strip(' '))\n self.s.sendall(('READ:DEV:T' + str(chan_cool) + ':TEMP:SIG:TEMP\\r\\n').encode())\n resp1 = self.s.recv(1024).decode().split(':')[-1].strip('K\\n')\n self.s.sendall(('READ:DEV:T' + str(chan_cool) + ':TEMP:MEAS:ENAB\\r\\n').encode())\n resp2 = self.s.recv(1024).decode().split(':')[-1].strip('\\n')\n print('Cooldown channel temp (' + str(chan_cool) + '): ' + str(resp1) + ' K, (sensor: ' + resp2 + ')') \n chan_mix = str(self.read_Tchandefs()[1].split(':')[1].strip(' '))\n self.s.sendall(('READ:DEV:T' + str(chan_mix) + ':TEMP:SIG:TEMP\\r\\n').encode())\n resp3 = self.s.recv(1024).decode().split(':')[-1].strip('K\\n')\n self.s.sendall(('READ:DEV:T' + str(chan_mix) + ':TEMP:MEAS:ENAB\\r\\n').encode())\n resp4 = self.s.recv(1024).decode().split(':')[-1].strip('\\n')\n print('Mixing chamber temp (' + str(chan_mix) + '): ' + str(resp3) + ' K, (sensor: ' + resp4 + ')') \n print('-----------------------------------------------------')\n print('Tank pressure (P1): ' + str(self.read_pres1()) + str(' bar'))\n print('Condense pressure (P2): ' + str(self.read_pres2()) + str(' bar'))\n print('Still pressure (P3): ' + str(self.read_pres3()) + str(' bar'))\n print('Turbo back pressure (P4): ' + str(self.read_pres4()) + str(' bar'))\n print('Forepump back pressure (P5): ' + str(self.read_pres5()) + str(' bar'))\n print('OVC pressure (P6): ' + str(self.read_pres6()) + str(' bar'))\n print('------------------------------------------------------')\n print('Heater mode: ' + str(self.read_loop()))\n print('Heater control channel: ' + str(self.read_Tchan()))\n self.s.sendall(('READ:DEV:T' + str(self.read_Tchan()) + ':TEMP:LOOP:TSET\\r\\n').encode())\n resp5 = self.s.recv(1024).decode().split(':')[-1].strip('K\\n')\n print('Heater setpoint: ' + str(resp5) + ' K')\n self.s.sendall(('READ:DEV:T' + str(self.read_Tchan()) + ':TEMP:LOOP:RANGE\\r\\n').encode())\n resp6 = self.s.recv(1024).decode().split(':')[-1].strip('\\n')\n print('Heater range: ' + str(resp6))\n print('------------------------------------------------------')\n print('Valve 1: ' + str(self.read_valve1()))\n print('Valve 2: ' + str(self.read_valve2()))\n print('Valve 3: ' + str(self.read_valve3()))\n print('Valve 4: ' + str(self.read_valve4()))\n print('Valve 5: ' + str(self.read_valve5()))\n print('Valve 6: ' + str(self.read_valve6()))\n print('Valve 7: ' + str(self.read_valve7()))\n print('Valve 8: ' + str(self.read_valve8()))\n print('Valve 9: ' + str(self.read_valve9()))\n print('------------------------------------------------------')\n print('3He compressor: ' + str(self.read_compstate()))\n print('Forepump: ' + str(self.read_fpstate()))\n print('Turbo pump: ' + str(self.read_turbstate()) + ' (' + str(self.read_turbspeed()) + ' Hz)')\n print('Pulse tube compressor: ' + str(self.read_PTRstate()))\n print('------------------------------------------------------')\n \n \n","sub_path":"instruments/Triton.py","file_name":"Triton.py","file_ext":"py","file_size_in_byte":16841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"38355617","text":"from .utils import log_to_postgres\nfrom logging import WARNING\nimport zlib\n\n\nclass PossiblyCompressedFile (object):\n magic = None\n file_type = None\n mime_type = None\n proper_extension = None\n\n def __init__(self, f, filename = None):\n # f is an open file or file like object\n self.f = f\n # filename is the filename of the remote or local object\n self.filename = filename\n #self.accessor = self.open()\n\n @classmethod\n def is_magic(self, data):\n return data.startswith(self.magic)\n \n @classmethod\n def is_file(self, filename):\n if filename.lower().endswith(self.file_type):\n log_to_postgres(\"Found file type: \" + self.file_type, WARNING)\n return filename.lower().endswith(self.file_type)\n\n def open(self):\n return None\n \nclass UncompressedFile (PossiblyCompressedFile):\n def open(self):\n if (self.f == None):\n return open(self.filename).read().split(\"\\n\")\n else:\n return self.f.get_contents_as_string().split(\"\\n\")\n\nimport bz2\n\nclass BZ2File (PossiblyCompressedFile):\n magic = '\\x42\\x5a\\x68'\n file_type = 'bz2'\n mime_type = 'compressed/bz2'\n\n def open(self):\n if (self.f == None):\n return bz2.BZ2File(self.filename).read().split(\"\\n\")\n else:\n return bz2.decompress(self.f.get_contents_as_string()).split(\"\\n\")\n\nimport gzip\n\nclass GZFile (PossiblyCompressedFile):\n magic = '\\x1f\\x8b\\x08'\n file_type = 'gz'\n mime_type = 'compressed/gz'\n \n d = zlib.decompressobj(16+zlib.MAX_WBITS)\n\n def open(self):\n if (self.f == None):\n return gzip.GzipFile(self.filename).read().split(\"\\n\")\n else:\n return self.d.decompress(self.f.get_contents_as_string()).split(\"\\n\")\n\n\n# factory function to create a suitable instance for accessing files\ndef get_compressed_file(filename):\n with file(filename, 'rb') as f:\n start_of_file = f.read(1024)\n f.seek(0)\n for cls in (BZ2File, GZFile):\n if cls.is_magic(start_of_file):\n return cls(None, filename)\n\n return UncompressedFile(None, filename)\n \ndef get_compressed_file_remote(f, filename):\n \n for cls in (BZ2File, GZFile):\n if cls.is_file(filename):\n return cls(f, filename)\n\n return UncompressedFile(f, filename)\n\n","sub_path":"compression.py","file_name":"compression.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"642565442","text":"# Question\n\nclass Question:\n def __init__(self, text, choices, answer):\n self.text = text\n self.choices = choices\n self.answer = answer\n \n def chechAnswer(self, answer):\n return self.answer == answer\n\n\n\n# Quiz\n\nclass Quiz:\n def __init__(self, questions):\n self.questions = questions\n self.score = 0\n self.questionIndex = 0\n\n def getQuestion(self):\n return self.questions[self.questionIndex]\n\n def displayQuestion(self):\n question = quiz.getQuestion()\n print(f'Question {self.questionIndex + 1}: {question.text}')\n\n for q in question.choices:\n print('-' + q)\n\n answer = input('Answer : ')\n self.guess(answer)\n self.loadQuestion()\n\n def guess(self, answer):\n question = self.getQuestion()\n\n if question.chechAnswer(answer):\n self.score += 1\n self.questionIndex +=1\n\n def loadQuestion(self):\n if len(self.questions) == self.questionIndex:\n self.showScore()\n else:\n self.displayProgress()\n self.displayQuestion()\n\n def showScore(self):\n print ('Score : ', self.score)\n\n def displayProgress(self):\n totalQuestion = len(self.questions)\n questionNumber = self.questionIndex + 1\n\n if questionNumber > totalQuestion:\n print('Quiz Over...')\n else:\n print(f'Question {questionNumber} of {totalQuestion}'.center(100,'*'))\n\nq1 = Question('Which is the best programming language ?', ['C#','python','javascript','java'], 'python')\nq2 = Question('Which is the most popular programming language ?', ['python','C#','javascript','java'], 'python')\nq3 = Question('Which programming language is the most profitable ?', ['C#','java','javascript','python'], 'python')\nquestions = [q1,q2,q3]\n\nquiz = Quiz(questions)\nquiz.loadQuestion()","sub_path":"QuizApp.py","file_name":"QuizApp.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"291538339","text":"\r\nimport pandas as pd\r\nimport numpy as np\r\nimport pulp\r\nimport sys\r\n\r\nsys.path.append('..')\r\nCOLS_POS = ['ls', 'st', 'rs', \r\n 'lw', 'lf', 'cf', 'rf', 'rw', \r\n 'lam', 'cam', 'ram', \r\n 'lm', 'lcm', 'cm', 'rcm', 'rm', \r\n 'lwb', 'ldm', 'cdm', 'rdm', 'rwb',\r\n 'lb', 'lcb', 'cb', 'rcb', 'rb',\r\n 'gk']\r\n\r\nFORMATION_4_4_2 = ['gk','rb', 'rcb', 'lcb', 'lb','rm', 'rcm', 'lcm', 'lm','rs', 'ls']\r\n\r\nFORMATION_5_3_2 = ['gk','rcb', 'cb', 'lcb','rwb', 'lwb','rcm', 'lcm','cam','rs', 'ls']\r\nFORMATION_4_1_2_1_2=['gk','rb','rcb','lcb','lb','cdm','rcm','lcm','cam','rs','ls']\r\n\r\nFORMATION_4_2_4 = ['gk','rb','rcb','lcb','lb','rw','rcm','lcm','lw','rs','ls']\r\n\r\nFORMATION_4_3_2_1=['gk','rb','rcb','lcb','lb','rw','rcm','lcm','lw','cam','st']\r\n\r\nFORMATION_3_5_2=['gk','rcb','cb','lcb','rm','rcm','cm','lcm','lm','rs','ls']\r\n\r\n\r\ndf=pd.read_csv('file3.csv')\r\ndef allplayer():\r\n return df['id'].tolist()\r\n \r\ndef create_temp(formation):\r\n \r\n TEMP = df[df['position'].isin(formation)]\r\n TEMP = TEMP[['id', 'position', 'value', 'overall']]\r\n return TEMP\r\n\r\n\r\ndef compute_best_lineup(df, formation, budget):\r\n\r\n # problem definition\r\n prob = pulp.LpProblem('BestLineup', pulp.LpMaximize)\r\n\r\n # get unique identifiers\r\n ids = df['id'].tolist()\r\n \r\n # parameters\r\n overalls = pd.Series(df['overall'].values, index=ids).to_dict()\r\n values = pd.Series(df['value'].values, index=ids).to_dict()\r\n\r\n ## dynamic paramters: selected positions\r\n ### convert position-strings into binary variables\r\n for pos in formation:\r\n df[f'is_{pos}'] = np.where(df['position'] == pos, 1, 0)\r\n \r\n ### extract positional parameters\r\n positions = {}\r\n for pos in formation:\r\n positions[pos] = pd.Series(df[f'is_{pos}'].values, index=ids).to_dict()\r\n\r\n # define the decision variable\r\n players = pulp.LpVariable.dicts(\"Player\", ids, cat='Binary')\r\n\r\n # set objective\r\n prob += pulp.lpSum([overalls[i] * players[i] for i in ids]), \"Total Rating of Lineup\"\r\n\r\n # set constraints\r\n prob += pulp.lpSum([players[i] for i in ids]) == 11, \"Pick_11_Players\"\r\n prob += pulp.lpSum([values[i] * players[i] for i in ids]) <= budget, \"Total_Value_Under_Budget\"\r\n ## check if required position is picked\r\n for pos in formation:\r\n prob += pulp.lpSum([positions[pos][i] * players[i] for i in ids]) == 1, f\"Pick_{pos.upper()}\"\r\n\r\n result = prob.solve()\r\n\r\n picked_player_ids = [int(i.name.split('_')[1]) for i in prob.variables() if i.varValue > 0]\r\n \r\n return formation, picked_player_ids\r\n\r\ndef process_photo_links(text):\r\n start = 'https://cdn.sofifa.com/players'\r\n end = '19_60.png'\r\n id_str = str(text.split('/')[-1].split('.')[0]).zfill(6)\r\n return str(f'{start}/{id_str[:3]}/{id_str[3:]}/{end}')\r\n\r\ndef player_details(ids):\r\n rslt_df = df[df['id'].isin(ids)]\r\n rslt_df.set_index(\"id\", drop=True, inplace=True)\r\n r=rslt_df[['name', 'nationality', 'age', 'club', 'overall', 'value','photo']]\r\n dictionary = r.T.to_dict('list')\r\n return dictionary\r\n\r\n","sub_path":"trial.py","file_name":"trial.py","file_ext":"py","file_size_in_byte":3102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"419583549","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('shop', '0004_video'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='image',\n name='for_main',\n field=models.BooleanField(default=False, verbose_name='\\u0414\\u043b\\u044f \\u0433\\u043b\\u0430\\u0432\\u043d\\u043e\\u0439'),\n preserve_default=True,\n ),\n ]\n","sub_path":"core/shop/migrations/0005_image_for_main.py","file_name":"0005_image_for_main.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"612176848","text":"n = int(input())\r\ndict = []\r\ncnt = [[0,'Kinh']]\r\nname = []\r\nfor i in range(n):\r\n temp = list(map(str,input().split()))\r\n dict.append(temp)\r\n check = False\r\n for x in cnt:\r\n if temp[1] == x[1]:\r\n x[0] += 1\r\n check = True\r\n if not check:\r\n cnt.append([1,temp[1]])\r\n\r\nfor i in dict:\r\n if i[1] == max(cnt)[1]:\r\n print(i[0])\r\n ","sub_path":"E_7.py","file_name":"E_7.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"551590997","text":"\"\"\"\nPredictor based on this notebook\nhttps://www.kaggle.com/szabo7zoltan/colorandcountingmoduloq\n\"\"\"\nfrom base.iodata import IOData\nfrom base.field import Field\nfrom predictors.basic import Predictor\nfrom predictors.basic import AvailableEqualShape\n\nimport numpy as np\n\ndef get_p1_p2(i, j, n, k, v, q1, q2):\n if v == 0 or v ==2:\n p1 = i % q1\n else:\n p1 = (n - 1 - i) % q1\n if v == 0 or v == 3:\n p2 = j % q2\n else:\n p2 = (k - 1 - j) % q2\n return p1, p2\n\n\nclass ColorCountingPredictor(Predictor, AvailableEqualShape):\n def __init__(self):\n self.best_Dict = None\n self.best_Q1 = -1\n self.best_Q2 = -1\n self.best_v = -1\n\n def train(self, iodata_list):\n pairs = [\n (Q1, Q2)\n for t in range(15)\n for Q1 in range(1, 8) \n for Q2 in range(1, 8)\n if Q1 + Q2 == t\n ]\n h, w = list(zip(*[iodata.input_field.shape for iodata in iodata_list]))\n hmax = max(h)\n wmax = max(w)\n pairs = [(Q1, Q2) for Q1, Q2 in pairs if Q1 Optional[str]:\n blocks = code_blocks(string)\n answers = [answer for answer in [parser.parse(block) for block in blocks] if answer is not None]\n parsed = '```\\n' + '\\n'.join(answers) + '\\n```' if answers else None\n emoted = analyze(string)\n response = []\n if parsed:\n response.append(parsed)\n if emoted:\n response.append(emoted)\n if response:\n return mention if (mention and parsed) else '' + '\\n'.join(response)\n return\n\n\ndef code_blocks(string: str, syntax_type: str='kismet'):\n blocks = []\n fence = None\n ignore = False\n for line in string.splitlines(keepends=True):\n while line:\n if fence:\n match = regex.search(fence, line)\n if match:\n if not ignore:\n blocks[(-1)] += line[:match.start()]\n fence = None\n line = line[match.end():]\n else:\n if not ignore:\n blocks[(-1)] += line\n line = None\n else:\n line = None\n else:\n match = regex.search('^`{3,}', line)\n if match:\n fence = '^' + match.group()\n syntax = line[match.end():]\n if syntax != '\\n' and syntax != syntax_type + '\\n':\n ignore = True\n else:\n blocks.append('')\n line = None\n else:\n match = regex.search('`+', line)\n if match:\n fence = match.group()\n line = line[match.end():]\n blocks.append('')\n else:\n line = None\n\n if fence:\n del blocks[-1]\n return blocks","sub_path":"pycfiles/kismet-0.4.0-py3-none-any/core.cpython-37.py","file_name":"core.cpython-37.py","file_ext":"py","file_size_in_byte":2715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"215229543","text":"import pytest\n\nfrom cervantes.models import Translation\n\n\nclass TestTranslation():\n def test_get_all(self, db):\n \"\"\"Check if the records are produced in the correct order.\"\"\"\n\n translation_ids = [t.uid for t in Translation.get_all()]\n\n expected_order = ['uid0000004', 'uid0000003',\n 'uid0000002', 'uid0000001', 'uid0000005', ]\n\n assert translation_ids == expected_order\n\n def test_get_all_pending(self, db):\n \"\"\"Check if the records are filtered correctly.\"\"\"\n\n translation_ids = [t.uid for t in Translation.get_all_pending()]\n\n expected_ids = ['uid0000005', ]\n\n assert translation_ids == expected_ids\n\n def test_representation(self, db):\n \"\"\"Test the __repr__ format of the Translation records\"\"\"\n\n translations = Translation.query.all()\n\n assert translations[0].__repr__(\n ) == ' es] \"Sample text 1\">'\n assert translations[1].__repr__(\n ) == ' es] \"Sample text with\">'\n assert translations[4].__repr__(\n ) == ' es] \"Sample text 5\">'\n\n def test_dictify(self, db):\n \"\"\"Test the serialization method of the Translation records\"\"\"\n\n translations = Translation.query.all()\n\n completed_translation = {\n 'uid': 'uid0000001',\n 'status': 'completed',\n 'source_language': 'en',\n 'target_language': 'es',\n 'text': 'Sample text 1',\n 'translated_text': 'El sample text 1',\n 'text_length': 16,\n 'date_created': '2019-12-20 15:30:45',\n 'date_updated': '2019-12-20 15:30:45',\n }\n\n new_translation = {\n 'uid': 'uid0000005',\n 'status': 'new',\n 'source_language': 'en',\n 'target_language': 'es',\n 'text': 'Sample text 5',\n 'translated_text': None,\n 'text_length': 0,\n 'date_created': '2019-12-20 15:30:45',\n 'date_updated': '2019-12-30 15:30:45',\n }\n\n assert translations[0].dictify() == completed_translation\n assert translations[4].dictify() == new_translation\n","sub_path":"cervantes/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"352735331","text":"from django import forms\nfrom django.forms.widgets import DateTimeBaseInput, DateTimeInput\nfrom django.http import request\nfrom django.template.defaultfilters import default\nfrom infinite.models import Game, Category,Comment\nfrom django.contrib.auth.models import User\nfrom infinite.models import UserProfile\n\n#form of Category, user need to input name when adding new category.\nclass CategoryForm(forms.ModelForm):\n name = forms.CharField(max_length=128,\n help_text=\"Please enter the category name.\")\n slug = forms.CharField(widget=forms.HiddenInput(), required=False)\n\n class Meta:\n model = Category\n fields = ('name',)\n\n#form of Game, user need to input name, release date, description of the game, and upload an image when adding new category.\nclass GameForm(forms.ModelForm):\n name = forms.CharField(max_length=128,\n help_text=\"Please enter the name of the game.\")\n released_date = forms.DateField(widget=forms.DateInput(attrs={'type':'date'}),\n help_text=\"Please enter the released date of the game.\")\n\n description = forms.CharField(widget=forms.Textarea,\n help_text=\"Please enter the description of the game.\")\n\n picture = forms.ImageField(\n help_text=\"Please upload an image.\")\n likes = forms.IntegerField(widget=forms.HiddenInput(), initial=0)\n slug = forms.CharField(widget=forms.HiddenInput(), required=False)\n class Meta:\n model = Game\n exclude = ('category',)\n\n\nclass UserForm(forms.ModelForm):\n password = forms.CharField(widget=forms.PasswordInput())\n class Meta:\n model = User\n fields = ('username', 'email', 'password',)\n\n #user form for uploading personal profile image in profile page\nclass UserProfileForm(forms.ModelForm):\n picture = forms.ImageField(\n help_text=\"Please upload your profile photo.\")\n \n class Meta:\n model = UserProfile\n fields = ('picture',)\n\n#comment form for inputing comments in game page.\nclass CommentForm(forms.ModelForm):\n comment = forms.CharField(max_length=500, widget=forms.Textarea,\n help_text=\"Please enter your comment.\" )\n class Meta:\n model = Comment\n fields = ('comment',)\n\n \n","sub_path":"infinite/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"80772500","text":"_DEFAULT_N = 1000\n_N = (2000, 3000, 4000, 6000, 8000, 12000, 16000, 24000, 32000)\n_DEFAULT_L = 20000\n_L = (5000, 7500, 10000, 15000, 30000, 40000)\n_DEFAULT_r = 1\n_r = (2, 3, 4, 6, 8, 16)\n_DEFAULT_STEPS = 1000\n_STEPS = (250, 375, 500, 750, 1500, 2000, 3000)\n\n_WRITE_TO_DIR = 'test_in/'\n\ndef writeFile(N, L, r, steps):\n\twith open(_WRITE_TO_DIR + '{}-{}-{}-{}.in'.format(N, L, r, steps), 'w') as testOut:\n\t\ttestOut.write('{}\\n{}\\n{}\\n{}\\nperf\\n'.format(N, L, r, steps))\n\nwriteFile(_DEFAULT_N, _DEFAULT_L, _DEFAULT_r, _DEFAULT_STEPS)\n\nfor n in _N:\n\twriteFile(n, _DEFAULT_L, _DEFAULT_r, _DEFAULT_STEPS)\n# for l in _L:\n# \twriteFile(_DEFAULT_N, l, _DEFAULT_r, _DEFAULT_STEPS)\n# for r in _r:\n# \twriteFile(_DEFAULT_N, _DEFAULT_L, r, _DEFAULT_STEPS)\n# for s in _STEPS:\n# \twriteFile(_DEFAULT_N, _DEFAULT_L, _DEFAULT_r, s)\n","sub_path":"cuda/submission/testDispatcher/floatTest/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"631441114","text":"import logging\nfrom urllib.parse import urlparse\nfrom http.client import HTTPConnection, BadStatusLine\n\n\nclass VarnishPurger(object):\n\n def __init__(self):\n self.logger = logging.getLogger('vaas')\n\n def log_and_return_data(self, responses_summary):\n self.logger.debug(responses_summary)\n return responses_summary\n\n def purge_url(self, url, servers):\n parsed_url = urlparse(url)\n headers = {\"Host\": parsed_url.hostname}\n data = {'success': {}, 'error': {}}\n\n for server in servers:\n try:\n conn = HTTPConnection(server.ip, server.http_port)\n purge_url = parsed_url.path\n if parsed_url.query:\n purge_url = \"{}?{}\".format(parsed_url.path, parsed_url.query)\n conn.request(\"PURGE\", purge_url, body='', headers=headers)\n resp = conn.getresponse().status\n data['success'][server.ip] = \"varnish http response code: {}, url={}\".format(resp, url)\n except BadStatusLine:\n data['error'][server.ip] = \"Bad status line from varnish server, url={}\".format(url)\n except Exception as e:\n data['error'][server.ip] = \"Unexpected error: {}, url={}\".format(e, url)\n\n return self.log_and_return_data(data)\n","sub_path":"vaas-app/src/vaas/purger/purger.py","file_name":"purger.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"229235274","text":"import os\nimport routing\nfrom datetime import datetime\nfrom flask import Flask, jsonify\n\napp = Flask(__name__)\n\nif \"PATH_DATA\" not in os.environ:\n\tos.environ[\"PATH_DATA\"] = \"data\"\n\nROUTES = routing.allRoutes()\n\ndef myTs(currentdt):\n\tif currentdt.weekday() < 5:\n\t\t# if currentdt.hour > 23 or currentdt.hour < 6:\n\t\t# \treturn routing.TimeStatus.late\n\t\treturn routing.TimeStatus.weekday\n\telif currentdt.weekday() == 5:\n\t\treturn routing.TimeStatus.sat\n\telse:\n\t\treturn routing.TimeStatus.sun\n\n\n@app.route(\"/api//\", methods=[\"GET\"])\ndef schedule(stationFrom, stationTo):\n\tstationFrom = stationFrom.replace(\"+\", \" \")\n\tstationTo = stationTo.replace(\"+\", \" \")\n\tnow = datetime.now()\n\thour, minute = now.hour, now.minute\n\tout = {}\n\troute = next(filter(lambda route: myTs(now) == route._ts and route.linked(stationFrom, stationTo), ROUTES))\n\tnextHr, nextMin = route.nextTrain(stationFrom, hour, minute)\n\tout['next'] = \"%d:%02d\" % (nextHr, nextMin)\n\tout['nextnext'] = \"%d:%02d\" % route.nextTrain(stationFrom, nextHr, nextMin)\n\treturn jsonify(out)\n\n\napp.run(host='0.0.0.0', debug=True, port=8889)","sub_path":"server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"174111357","text":"import json\nfrom nose.tools import eq_, assert_in\n\nfrom solariat.db import fields\nfrom solariat_bottle.tests.base import UICaseSimple\nfrom solariat_bottle.db.dashboard import Dashboard, insert_default_dashboard_types\nfrom solariat_bottle.configurable_apps import APP_JOURNEYS\n\n\nclass DashboardTest(UICaseSimple):\n\n def setUp(self):\n super(DashboardTest, self).setUp()\n db_types = insert_default_dashboard_types()\n self.db_types_name2id = {each.type: str(each.id) for each in db_types}\n self.login()\n self.account.selected_app = APP_JOURNEYS\n self.account.save()\n\n def _create(self, data):\n resp = self.client.post('/dashboards', data=json.dumps(data))\n return resp\n\n def test_create(self):\n data = dict(\n type_id = self.db_types_name2id['journeys'],\n title = 'journeys'\n )\n resp = self._create(data)\n\n eq_(resp.status_code, 201)\n eq_(Dashboard.objects.count(), 1)\n\n d = Dashboard.objects.find_by_user(self.user).next()\n eq_(d.owner, self.user)\n\n data = json.loads(resp.data)\n eq_(data['ok'], True)\n for each in ['id', 'title', 'widgets']:\n assert_in(each, data['data'])\n\n def test_read_many(self):\n titles = ['journeys', 'advisors', 'conversation']\n for title in titles:\n data = dict(\n type_id = self.db_types_name2id['journeys'],\n title = title\n )\n self._create(data)\n\n resp = self.client.get('/dashboards')\n data = json.loads(resp.data)\n dashboards = data['data']\n\n eq_(len(dashboards), len(titles))\n\n for dashboard in dashboards:\n for each in ['id', 'title', 'widgets']:\n assert_in(each, dashboard)\n\n def test_read_single(self):\n data = dict(\n type_id = self.db_types_name2id['journeys'],\n title = 'journeys'\n )\n resp = self._create(data)\n data = json.loads(resp.data)\n dashboard_id = data['data']['id']\n\n resp = self.client.get('/dashboards/' + dashboard_id)\n data = json.loads(resp.data)\n eq_(dashboard_id, data['data']['id'])\n\n for each in ['id', 'title', 'widgets']:\n assert_in(each, data['data'])\n\n def test_update(self):\n data = dict(\n type_id = self.db_types_name2id['journeys'],\n title = 'journeys'\n )\n resp = self._create(data)\n data = json.loads(resp.data)\n dashboard_id = data['data']['id']\n\n updated_title = data['title'] = 'journeys optimization'\n # use self user for now as we don't have another user\n updated_shared_to = data['shared_to'] = [str(self.user.id)]\n\n resp = self.client.put('/dashboards/' + dashboard_id, data=json.dumps(data))\n data = json.loads(resp.data)\n eq_(dashboard_id, data['data']['id'])\n eq_(updated_title, data['data']['title'])\n eq_(updated_shared_to, data['data']['shared_to'])\n eq_(updated_title, Dashboard.objects.get().title)\n eq_(fields.ObjectId(updated_shared_to[0]), Dashboard.objects.get().shared_to[0])\n\n for each in ['id', 'title', 'widgets']:\n assert_in(each, data['data'])\n\n def test_delete(self):\n data = dict(\n type_id = self.db_types_name2id['journeys'],\n title = 'journeys'\n )\n resp = self._create(data)\n data = json.loads(resp.data)\n dashboard_id = data['data']['id']\n\n resp = self.client.delete('/dashboards/' + dashboard_id)\n eq_(resp.data, '')\n eq_(Dashboard.objects.count(), 0)\n\n def test_copy_dashboard(self):\n create_data = dict(\n type_id = self.db_types_name2id['journeys'],\n title = 'journeys'\n )\n resp = self._create(create_data)\n resp_data = json.loads(resp.data)\n dashboard_id = resp_data['data']['id']\n\n copy_data = dict(\n title = 'journeys copy',\n description = 'Dashboard copied from %s' % dashboard_id\n )\n\n copy_resp = self.client.post('/dashboards/%s/copy' % dashboard_id, data=json.dumps(copy_data))\n copy_resp_data = json.loads(copy_resp.data)\n eq_(copy_resp_data['data']['title'], copy_data['title'])\n eq_(copy_resp_data['data']['description'], copy_data['description'])\n","sub_path":"tests/test_dashboards.py","file_name":"test_dashboards.py","file_ext":"py","file_size_in_byte":4466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"204266586","text":"t = 0\nwith open(\"estoque.json\", \"r\") as estoque:\n arq = estoque.read()\n dicio = json.loads(arq)\n for i in dicio.items():\n q = i[\"quantidade\"]\n p = i[\"valor\"]\n t = t + q*p\nprint(t)\n ","sub_path":"backup/user_274/ch159_2020_06_22_17_30_50_883478.py","file_name":"ch159_2020_06_22_17_30_50_883478.py","file_ext":"py","file_size_in_byte":214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"352972386","text":"from HelpersClustering import KMeans\nfrom HelpersCsv import ParseCsv, WriteCsv\nimport argparse\nimport sys\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--path', '-p', help=\"path of input data\", type=str)\nparser.add_argument('--hasheaders', '-hh', help=\"if file has headers\", type=int, default=0)\nparser.add_argument('--clusters', '-c', help=\"pass the number of clusters for the execution of k-means\", type=int)\n\nif __name__ == '__main__':\n args = parser.parse_args()\n\n pcsv = ParseCsv(args.path, has_headers=args.hasheaders)\n data = pcsv.get_data()\n\n kmeans = KMeans(data, args.clusters)\n clusters = kmeans.execute()\n\n row_clusters = {}\n for cluster_id, rows in clusters.items():\n for row in rows:\n row_clusters[row['row_index']] = cluster_id\n\n result = [['Instance Index', 'Cluster Id']]\n\n for i in range(0, len(data)):\n result.append([i,row_clusters[i]])\n\n wcsv = WriteCsv(filename='clusters.txt')\n wcsv.write_data(result)\n","sub_path":"TEIA/Pratica6/calculateKMeans.py","file_name":"calculateKMeans.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"227898533","text":"DEFAULT_DISCOUNT_PERCENT = 5 # Процент скидки по бонусной карте\nBONUS_CARD_NUMBER_LEN = 15\nBONUS_CARD_EMBOSSED_LEN = 6\n\nCARD_SEARCH_MAP = {\n BONUS_CARD_EMBOSSED_LEN: 'embossed_number',\n BONUS_CARD_NUMBER_LEN: 'number',\n}\nLOGGER_NAME = 'errors'\n\nCEILING = 50 # Рублей - кратность, до которой необходимо округлять\n","sub_path":"loyalty/bonuses/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"426123313","text":"from util import *\n\n\n@apply\ndef apply(self, offset):\n from axiom.algebra.sum.limits.subs.offset import limits_subs\n return Equal(self, limits_subs(Maxima, self, offset), evaluate=False)\n\n\n@prove(proved=False)\ndef prove(Eq):\n m, n = Symbol(integer=True)\n f = Function(real=True)\n Eq << apply(Maxima[n:1:m + 1](f(n)), 1)\n\n\nif __name__ == '__main__':\n run()\n# created on 2021-09-08\n","sub_path":"axiom/algebra/maxima/limits/subs/offset.py","file_name":"offset.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"79947895","text":"def 开始():\n 多媒体.开启声音识别(常量.识别拍手)\n \n 视觉.直到识别到(常量.两次拍手)\n 告知('twice')\n 亮灯(绿色)\n\n 视觉.直到识别到(常量.三次拍手)\n print('thrice')\n 亮灯(红色)\n \ndef 亮灯(颜色):\n 云台灯(常量.云台所有, 颜色, 常量.效果常亮)\n 时间.睡眠(1)\n 云台灯(常量.云台所有, 黑色, 常量.效果熄灭)\n\ndef 云台灯(位置, 颜色, 灯效):\n LED灯.云台(位置, 颜色['红'], 颜色['绿'], 颜色['蓝'], 灯效)\n\n黑色 = {'红': 0, '绿': 0, '蓝': 0}\n红色 = {'红': 255, '绿': 0, '蓝': 0}\n绿色 = {'红': 0, '绿': 255, '蓝': 0}\n\n# 以下为API中文化部分, 与程序逻辑无关. 请勿作修改.\n\nstart = 开始\n\n多媒体 = media_ctrl\n多媒体.开启声音识别 = 多媒体.enable_sound_recognition\n\n视觉 = vision_ctrl\n视觉.直到识别到 = 视觉.cond_wait\n视觉.取行人信息 = 视觉.get_people_detection_info\n\nLED灯 = led_ctrl\nLED灯.云台 = LED灯.set_top_led\n\n# 常量部分\n常量 = rm_define\n常量.识别拍手 = 常量.sound_detection_applause\n常量.两次拍手 = 常量.cond_sound_recognized_applause_twice\n常量.三次拍手 = 常量.cond_sound_recognized_applause_thrice\n\n常量.云台所有 = 常量.armor_top_all\n常量.云台左 = 常量.armor_top_left\n常量.云台右 = 常量.armor_top_right\n\n常量.效果常亮 = 常量.effect_always_on\n常量.效果熄灭 = 常量.effect_always_off\n常量.效果呼吸 = 常量.effect_breath\n常量.效果闪烁 = 常量.effect_flash\n常量.效果走马灯 = 常量.effect_marquee\n\n时间 = time\n时间.睡眠 = 时间.sleep\n\n告知 = print","sub_path":"Python API视频演示与例程/识别/拍手.py","file_name":"拍手.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"637360058","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 30 17:34:28 2019\n\n@author: amie\n\"\"\"\n\n\n\nimport picdata as pic\nimport numpy as np\nfrom numba import jit\nimport time \nimport math\nimport matplotlib.pyplot as plt\n#from pca import PCA_face_example\nimport pickle\nimport cv2\nimport os\n\n\n\nclass picprocess():\n def __init__(self):\n self.strain = 1\n self.sN_NEURONS = 100\n self.sN_NEURONS_1 = 100\n self.sETA = 1.0\n self.sXI = 1.0\n self.strain_round = 100\n self.strainpath = 'trainpicpath.txt'\n self.stestpath = 'testpicpath.txt'\n value = self.readconfig('config_FGV.txt') \n self.strain = int(value[0])\n self.sN_NEURONS = int(value[1])\n self.sN_NEURONS_1 = int(value[2])\n self.sETA = float(value[3])\n self.sXI = float(value[4])\n self.strain_round = int(value[5])\n self.strainpath =value[6]\n self.stestpath = value[7]\n \n \n \n def generateGxGyVF(self):\n Gx = []\n Gy = []\n F = []\n V = []\n for fpath,dirname,file in os.walk('pic_Gx'):\n for f in file:\n f = 'pic_Gx/' + f\n with open(f,'rb') as outfile:\n data = pickle.load(outfile)\n Gx.append(data)\n \n for fpath,dirname,file in os.walk('pic_Gy'):\n for f in file:\n f = 'pic_Gy/' + f\n with open(f,'rb') as outfile:\n data = pickle.load(outfile)\n Gy.append(data)\n \n for fpath,dirname,file in os.walk('pic_F'):\n for f in file:\n f = 'pic_F/' + f\n with open(f,'rb') as outfile:\n data = pickle.load(outfile)\n F.append(data)\n for fpath,dirname,file in os.walk('pic_V'):\n for f in file:\n f = 'pic_V/' + f\n with open(f,'rb') as outfile:\n data = pickle.load(outfile)\n V.append(data) \n \n \n return Gx,Gy,F,V\n \n \n \n def generatetraindata(self,Gx,Gy,F,V):\n Gxyv = []\n Fxy = []\n for i in range(len(Gx)):\n print('Gxshape:',Gx[i].shape)\n print('Fshape:',F[i].shape)\n for m in range(Gx[i].shape[0]):\n for n in range(Gx[i].shape[1]):\n gxyv = [Gx[i][m,n],Gy[i][m,n],V[i][m,n]]\n fxy = [F[i][m,n,0],F[i][m,n,1]]\n Gxyv.append(gxyv)\n Fxy.append(fxy)\n Gxyv = np.array(Gxyv)\n Fxy = np.array(Fxy)\n \n return Gxyv,Fxy\n \n \n \n def readconfig(self,configpath):\n value = []\n with open(configpath,'r') as config:\n for line in config.readlines():\n line = line.split('=')\n value.append(line[1].split('\\n')[0])\n return value\n \n \n def cosine_dis(self, x, y):\n num = (x*y).sum(axis=1)\n denom = np.linalg.norm(x) * np.linalg.norm(y,axis=1)\n return num/denom\n \n def predict_corner(self):\n \n # initialize the parameters\n population_a = np.zeros((self.sN_NEURONS,1)) \n population_s = np.ones((self.sN_NEURONS,1))*0.045\n wcross = np.random.uniform(0,1,(self.sN_NEURONS,self.sN_NEURONS_1)) \n population_Wcross = wcross / wcross.sum() \n population_Winput = np.random.random((self.sN_NEURONS,3))/10.0 \n population_Winput_1 = np.random.random((self.sN_NEURONS_1,2))/10.0 \n \n \n # load the model\n with open('Weight data/train_vgf_300_1000/populations_Wcross799.pkl','rb') as file:\n population_Wcross = pickle.load(file)\n \n with open('Weight data/train_vgf_300_1000/populations_Winput799.pkl','rb') as file1:\n population_Winput = pickle.load(file1)\n \n with open('Weight data/train_vgf_300_1000/population_Winput_1799.pkl','rb') as file2:\n population_Winput_1 = pickle.load(file2)\n \n with open('Weight data/train_vgf_300_1000/populations_s799.pkl','rb') as file3:\n population_s = pickle.load(file3)\n \n # show the HL matrix\n plt.imshow(population_Wcross)\n cap = cv2.VideoCapture('slow_traffic_small.mp4')\n\n # params for ShiTomasi corner detection\n feature_params = dict( maxCorners = 100,\n qualityLevel = 0.3,\n minDistance = 7,\n blockSize = 7 ) \n color = np.random.randint(0,255,(100,3))\n\n # Take first frame and find corners in it\n ret, old_frame = cap.read()\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)\n p_time = p0[:,0,:]\n # Create a mask image for drawing purposes\n mask = np.zeros_like(old_frame)\n count = 1\n while(1):\n sensory_x = []\n count+=1\n ret,frame = cap.read()\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n dx = cv2.Sobel(old_gray,cv2.CV_16S,1,0)\n dy = cv2.Sobel(old_gray,cv2.CV_16S,0,1)\n VI = frame_gray - old_gray\n \n dx = cv2.resize(dx,(6400,3600))\n dy = cv2.resize(dy,(6400,3600))\n VI = cv2.resize(VI,(6400,3600)) \n \n p0 = p0[:,0,:]\n good_old_around = (p0 * 10).astype(np.int64)\n for i in range(len(good_old_around)):\n if good_old_around[i][1] >= 3600:\n good_old_around[i][1] = 3599\n if good_old_around[i][0] >= 6400:\n good_old_around[i][1] = 6399\n \n x = dx[(good_old_around[i][1]),(good_old_around[i][0])]\n y = dy[(good_old_around[i][1]),(good_old_around[i][0])]\n vi = VI[(good_old_around[i][1]),(good_old_around[i][0])]*4\n sensory_x.append(np.array([x,y,vi])/1020.0)\n\n sensory_x = np.array(sensory_x) \n act_cur1 = np.zeros((100,1))\n x_drection = [] \n for i in range(sensory_x.shape[0]):\n input_sample = sensory_x[i].reshape(1,-1) \n temp = (np.power((input_sample - population_Winput),2).sum(axis=1)/100).reshape(-1,1) \n act_cur1 = (1/(np.sqrt(2*np.pi)*population_s))*np.exp(-temp/(2*np.power(population_s,2))) \n act_cur_sum = act_cur1.sum()\n if act_cur_sum == 0 :\n print('act_cur.sum() is less than 1e-323,ignore the update!')\n act_cur1 = act_cur1 / act_cur_sum\n population_a = act_cur1\n # get the position of winner neuron\n win_pos = population_a[:,0].argmax()\n pre_pos = population_Wcross[win_pos,:].argmax()\n # decode the HL matrix\n a1 = population_Winput_1[pre_pos]\n x_drection.append(a1)\n x_drection = np.array(x_drection)\n good_old = p_time\n good_new = p_time + x_drection\n for i,(new,old) in enumerate(zip(good_new,good_old)):\n a,b = new.ravel().astype(np.float32)\n c,d = old.ravel().astype(np.float32)\n mask = cv2.line(mask, (a,b),(c,d), color[i].tolist(), 2)\n frame = cv2.circle(frame,(a,b),5,color[i].tolist(),-1)\n img = cv2.add(frame,mask)\n # rebuild the optical flow picture\n cv2.imshow('rebuild flow',img)\n cv2.waitKey(100)\n p_time = good_new\n p0 = good_new[:,np.newaxis,:]\n \n\n def calmove(self,coordiante,coordinate_new):# [x1,y1],[float1,float2]\n co = []\n \n x0y0 = [coordiante[0] - 1, coordiante[1] - 1]\n x0y1 = [coordiante[0] - 1, coordiante[1]]\n x0y2 = [coordiante[0] - 1, coordiante[1] + 1]\n x1y0 = [coordiante[0] , coordiante[1] - 1]\n x1y1 = [coordiante[0] , coordiante[1]]\n x1y2 = [coordiante[0] , coordiante[1] + 1]\n x2y0 = [coordiante[0] + 1, coordiante[1] - 1]\n x2y1 = [coordiante[0] + 1, coordiante[1]]\n x2y2 = [coordiante[0] + 1, coordiante[1] + 1]\n \n co.append(x0y0)\n co.append(x0y1)\n co.append(x0y2)\n co.append(x1y0)\n co.append(x1y1)\n co.append(x1y2)\n co.append(x2y0)\n co.append(x2y1)\n co.append(x2y2)\n \n co = np.array(co)\n x1y1 = np.array(coordinate_new)\n \n co_x1y1 = co -x1y1\n co_x1y1 = np.power(co_x1y1,2)\n co_x1y1_sum = np.sum(co_x1y1,axis=1)\n posmax = co_x1y1_sum.argmin()\n print(co[posmax])\n \n \n return co[posmax]\n \n \n \n def parametrize_learning_law(self, v0, vf, t0, tf):\n y = np.zeros((tf-t0,1))\n t = [i for i in range(1,tf+1)]\n B = (vf*tf - v0*t0)/(v0 - vf)\n A = v0*t0 + B*v0\n y = [A/(t[i]+B) for i in range(len(t))]\n return y\n\n \n\n def speed_up_som(self):\n \n # get the training data\n Gx,Gy,F,V = a.generateGxGyVF()\n gxyv,fxy = a.generatetraindata(Gx,Gy,F,V)\n DxyvUxy = np.zeros((8720,5))\n with open('DxyvUxy.pkl','rb') as file:\n load = pickle.load(file)\n DxyvUxy = np.array(load)\n\n # Normalize the data\n sensory_x = DxyvUxy[:,0:3] / 1020.0\n sensory_y = DxyvUxy[:,3:5] \n\n # initialize the parameters\n N_NEURONS = self.sN_NEURONS # sensor1 \n N_NEURONS_1 = self.sN_NEURONS_1 # sensor2 \n population_s = np.ones((N_NEURONS,1))*0.045 # sensor1 tuning curve\n population_a = np.zeros((N_NEURONS,1)) # sensor1 activation value\n wcross = np.random.uniform(0,1,(N_NEURONS,N_NEURONS_1))\n population_Wcross = wcross / wcross.sum() # sensor1 HL matrix\n train_round = self.strain_round \n population_Winput = np.random.random((N_NEURONS,sensory_x.shape[1]))/100.0 # sensor1 weight \n sample_num = sensory_x.shape[0] \n sample_demension = sensory_x.shape[1] \n learning_sigmat = self.parametrize_learning_law(50,1,1,train_round) \n learning_alphat = self.parametrize_learning_law(0.1,0.001,1,train_round)\n ETA = 1.0 \n XI = 1e-3\n hwi = np.zeros((N_NEURONS,1)) \n \n population_s_1 = np.ones((N_NEURONS_1,1))*0.045 # sensor2 tuning curve\n population_a_1 = np.zeros((N_NEURONS_1,1)) # sensor1 activation value\n wcross_1 = np.random.uniform(0,1,(N_NEURONS_1,N_NEURONS))\n population_Wcross_1 = wcross_1 / wcross_1.sum() # sensor2 HL matrix\n print(sensory_y.shape)\n population_Winput_1 = np.random.random((N_NEURONS_1,sensory_y.shape[1]))/100.0 # sensor1 weight \n sample_num_1 = sensory_y.shape[0] \n sample_demension_1 = sensory_y.shape[1] \n ETA = 1.0 \n XI = 1e-3 \n hwi_1 = np.zeros((N_NEURONS_1,1)) \n hl_trainround = 100\n avg_act = np.zeros((N_NEURONS,1)) \n avg_act_1 = np.zeros((N_NEURONS_1,1)) \n \n # training \n for t in range(hl_trainround + train_round): \n if t < train_round: \n for sample_index in range(sample_num): \n \n act_cur1 = np.zeros((N_NEURONS,1))\n act_cur2 = np.zeros((N_NEURONS_1,1))\n \n \n input_sample = sensory_x[sample_index].reshape(1,-1) \n input_sample_2 = sensory_y[sample_index].reshape(1,-1)\n\n temp = (np.power((input_sample - population_Winput),2).sum(axis=1)/sample_demension).reshape(-1,1) \n temp1 = (np.power((input_sample_2 - population_Winput_1),2).sum(axis=1)/sample_demension_1).reshape(-1,1)\n \n # matrix calculate.All activation values are updated together\n act_cur1 = (1/(np.sqrt(2*np.pi)*population_s))*np.exp(-temp/(2*np.power(population_s,2)))\n act_cur2 = (1/(np.sqrt(2*np.pi)*population_s_1))*np.exp(-temp1/(2*np.power(population_s_1,2)))\n \n act_cur_sum = act_cur1.sum()\n act_cur_sum1 = act_cur2.sum()\n \n if act_cur_sum == 0 or act_cur_sum1 == 0:\n print('act_cur.sum() is less than 1e-323,ignore the update!')\n continue\n act_cur1 = act_cur1 / act_cur_sum\n act_cur2 = act_cur2 / act_cur_sum1\n \n population_a = (1-ETA)*population_a + ETA * act_cur1\n population_a_1 = (1-ETA)*population_a_1 + ETA * act_cur2\n \n win_pos = population_a[:,0].argmax()\n win_pos1 = population_a_1[:,0].argmax()\n \n pos_list = np.arange(0,N_NEURONS,1)\n pos_list_1 = np.arange(0,N_NEURONS_1,1)\n \n hwi = (np.exp(-np.power(pos_list - win_pos, 2) / (2 * np.power(learning_sigmat[t],2)))).reshape(N_NEURONS,1)\n hwi_1 = (np.exp(-np.power(pos_list_1 - win_pos1, 2) / (2 * np.power(learning_sigmat[t],2)))).reshape(N_NEURONS_1,1)\n \n # matrix calculate.All population_Winput values are updated together\n population_Winput = population_Winput+ \\\n learning_alphat[t] * hwi * (input_sample - population_Winput)\n \n population_Winput_1 = population_Winput_1+ \\\n learning_alphat[t] * hwi_1 * (input_sample_2 - population_Winput_1) \n \n # matrix calculate.All population_s values are updated together\n temp_s = (np.power((input_sample - population_Winput),2).sum(axis=1)/sample_demension).reshape(-1,1)\n population_s = population_s + \\\n learning_alphat[t] * (1/(np.sqrt(2*np.pi)*learning_sigmat[t])) * \\\n hwi * (temp_s - np.power(population_s,2))\n \n temp_s_1 = (np.power((input_sample_2 - population_Winput_1),2).sum(axis=1)/sample_demension_1).reshape(-1,1)\n population_s_1 = population_s_1 + \\\n learning_alphat[t] * (1/(np.sqrt(2*np.pi)*learning_sigmat[t])) * \\\n hwi_1 * (temp_s_1 - np.power(population_s_1,2))\n \n print('training:',t/(train_round+hl_trainround))\t \n \n # HL matrix training \n for sample_index in range(sample_num):\n\n act_cur1 = np.zeros((N_NEURONS,1))\n act_cur2 = np.zeros((N_NEURONS_1,1))\n \n input_sample = sensory_x[sample_index].reshape(1,-1) \n input_sample_2 = sensory_y[sample_index].reshape(1,-1)\n \n temp = (np.power((input_sample - population_Winput),2).sum(axis=1)/sample_demension).reshape(-1,1) \n temp1 = (np.power((input_sample_2 - population_Winput_1),2).sum(axis=1)/sample_demension_1).reshape(-1,1)\n \n # matrix calculate. All activation values are updated together\n act_cur1 = (1/(np.sqrt(2*np.pi)*population_s))*np.exp(-temp/(2*np.power(population_s,2)))\n act_cur2 = (1/(np.sqrt(2*np.pi)*population_s_1))*np.exp(-temp1/(2*np.power(population_s_1,2)))\n \n \n act_cur_sum = act_cur1.sum()\n act_cur_sum1 = act_cur2.sum()\n if act_cur_sum == 0 or act_cur_sum1 == 0:\n print('act_cur.sum() is less than 1e-323,ignore the update!')\n continue\n act_cur1 = act_cur1 / act_cur_sum\n act_cur2 = act_cur2 / act_cur_sum1\n \n population_a = (1-ETA)*population_a + ETA * act_cur1\n population_a_1 = (1-ETA)*population_a_1 + ETA * act_cur2\n \n OMEGA = 0.002 + 0.998/(t+2)\n avg_act[:,0] = (1-OMEGA)*avg_act[:, 0] + OMEGA*population_a[:,0]\n avg_act_1[:,0] = (1-OMEGA)*avg_act_1[:, 0] + OMEGA*population_a_1[:,0]\n \n population_Wcross = (1-XI)*population_Wcross + XI*(population_a - avg_act[:, 0].reshape(N_NEURONS,1))*(population_a_1 - avg_act_1[:, 0].reshape(N_NEURONS_1,1)).T\n\n if t%200 == 199:\n # save the model\n with open('populations_Wcross{}.pkl'.format(t),'wb') as output:\n pickle.dump(population_Wcross,output)\n with open('populations_Winput{}.pkl'.format(t),'wb') as output1:\n pickle.dump(population_Winput,output1)\n with open('population_Winput_1{}.pkl'.format(t),'wb') as output2:\n pickle.dump(population_Winput_1,output2) \n \n with open('populations_s{}.pkl'.format(t),'wb') as output3:\n pickle.dump(population_s,output3)\n with open('populations_s_1{}.pkl'.format(t),'wb') as output4:\n pickle.dump(population_s_1,output4)\n\nif __name__ == '__main__':\n a = picprocess()\n start = time.time()\n if a.strain == 1:\n a.speed_up_som()\n else:\n a.predict_corner()\n print(time.time() - start)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"trainFGV_sparse.py","file_name":"trainFGV_sparse.py","file_ext":"py","file_size_in_byte":17799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"594621116","text":"import numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nresult=[]\nfor i in range(1000):\n x1=np.random.normal(0.0,0.55)\n y1=x1*0.1+0.3 + np.random.normal(0.0,0.03)\n result.append([x1,y1])\n # print(x1)\nx_data=[v[0] for v in result]\ny_data=[v[1] for v in result]\n# plt.scatter(x_data,y_data,c='r')\n# plt.show()\n\n\nW=tf.Variable(tf.random_uniform([1],-1.0,1.0),name='W')\nb=tf.Variable(tf.zeros([1]),name='b')\ny=W*x_data+b\nloss=tf.reduce_mean(tf.square(y-y_data),name='loss')\n\noptimizer=tf.train.GradientDescentOptimizer(0.5)\ntrain=optimizer.minimize(loss,name='train')\nsess =tf.Session()\ninit=tf.global_variables_initializer()\nsess.run(init)\nprint('W=',sess.run(W),'b=',sess.run(b),'loss=',sess.run(loss))\nfor step in range(20):\n sess.run(train)\n print('W=', sess.run(W), 'b=', sess.run(b), 'loss=', sess.run(loss))\nwriter=tf.train.SummaryWriter(\"./tmp\",sess.graph)\n\n\nplt.scatter(x_data,y_data,c='r')\nplt.plot(x_data,sess.run(W)*x_data+sess.run(b))\nplt.show()","sub_path":"machineLearning/shuju3.py","file_name":"shuju3.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"554606786","text":"import os\nimport random\nimport sys\nfrom argparse import ArgumentParser, Namespace\nfrom pathlib import Path\nfrom pprint import pprint\n\nfrom flatland.utils.rendertools import RenderTool\nfrom torch.utils.tensorboard import SummaryWriter\nimport numpy as np\nimport torch\n\nfrom flatland.envs.rail_env import RailEnv, RailEnvActions\nfrom flatland.envs.rail_generators import sparse_rail_generator\nfrom flatland.envs.schedule_generators import sparse_schedule_generator\nfrom flatland.envs.observations import TreeObsForRailEnv\n\nfrom flatland.envs.malfunction_generators import malfunction_from_params, MalfunctionParameters\nfrom flatland.envs.predictions import ShortestPathPredictorForRailEnv\n\nbase_dir = Path(__file__).resolve().parent.parent\nsys.path.append(str(base_dir))\n\nfrom utils.observation_utils import normalize_observation\nfrom reinforcement_learning.timer import Timer\nfrom reinforcement_learning.rainbow_policy import RainbowAgent\n\ntry:\n import wandb\n\n wandb.init(sync_tensorboard=True)\nexcept ImportError:\n print(\"Install wandb to log to Weights & Biases\")\n\n\"\"\"\nThis file shows how to train multiple agents using a reinforcement learning approach.\n\nDocumentation: https://flatland.aicrowd.com/getting-started/rl/multi-agent.html\nResults: https://app.wandb.ai/masterscrat/flatland-examples-reinforcement_learning/reports/Flatland-Examples--VmlldzoxNDI2MTA\n\"\"\"\n\nSUPPRESS_OUTPUT = False\n\nif SUPPRESS_OUTPUT:\n # ugly hack to be able to run hyperparameters sweeps with w&b\n # they currently have a bug which prevents runs that output emojis to run :(\n def print(*args, **kwargs):\n pass\n\n\ndef train_agent(env_params, train_params):\n # Environment parameters\n n_agents = env_params.n_agents\n x_dim = env_params.x_dim\n y_dim = env_params.y_dim\n n_cities = env_params.n_cities\n max_rails_between_cities = env_params.max_rails_between_cities\n max_rails_in_city = env_params.max_rails_in_city\n seed = env_params.seed\n\n # Observation parameters\n observation_tree_depth = env_params.observation_tree_depth\n observation_radius = env_params.observation_radius\n observation_max_path_depth = env_params.observation_max_path_depth\n\n # Training parameters\n eps_start = train_params.eps_start\n eps_end = train_params.eps_end\n eps_decay = train_params.eps_decay\n n_episodes = train_params.n_episodes\n checkpoint_interval = train_params.checkpoint_interval\n n_eval_episodes = train_params.n_evaluation_episodes\n\n # Set the seeds\n random.seed(seed)\n np.random.seed(seed)\n\n # Break agents from time to time\n malfunction_parameters = MalfunctionParameters(\n malfunction_rate=1. / 10000, # Rate of malfunctions\n min_duration=15, # Minimal duration\n max_duration=50 # Max duration\n )\n\n # Observation builder\n predictor = ShortestPathPredictorForRailEnv(observation_max_path_depth)\n tree_observation = TreeObsForRailEnv(max_depth=observation_tree_depth, predictor=predictor)\n\n # Fraction of train which each speed\n speed_profiles = {\n 1.: 1.0, # Fast passenger train\n 1. / 2.: 0.0, # Fast freight train\n 1. / 3.: 0.0, # Slow commuter train\n 1. / 4.: 0.0 # Slow freight train\n }\n\n # Setup the environment\n env = RailEnv(\n width=x_dim,\n height=y_dim,\n rail_generator=sparse_rail_generator(\n max_num_cities=n_cities,\n grid_mode=False,\n max_rails_between_cities=max_rails_between_cities,\n max_rails_in_city=max_rails_in_city\n ),\n schedule_generator=sparse_schedule_generator(speed_profiles),\n number_of_agents=n_agents,\n malfunction_generator_and_process_data=malfunction_from_params(malfunction_parameters),\n obs_builder_object=tree_observation,\n random_seed=seed\n )\n\n env.reset(regenerate_schedule=True, regenerate_rail=True)\n\n # Setup renderer\n if train_params.render:\n env_renderer = RenderTool(env, gl=\"PGL\")\n\n # Calculate the state size given the depth of the tree observation and the number of features\n n_features_per_node = env.obs_builder.observation_dim\n n_nodes = 0\n for i in range(observation_tree_depth + 1):\n n_nodes += np.power(4, i)\n state_size = n_features_per_node * n_nodes\n\n # The action space of flatland is 5 discrete actions\n action_size = 5\n\n # Max number of steps per episode\n # This is the official formula used during evaluations\n # See details in flatland.envs.schedule_generators.sparse_schedule_generator\n max_steps = int(4 * 2 * (env.height + env.width + (n_agents / n_cities)))\n\n action_count = [0] * action_size\n action_dict = dict()\n agent_obs = [None] * env.get_num_agents()\n agent_prev_obs = [None] * env.get_num_agents()\n agent_prev_action = [2] * env.get_num_agents()\n update_values = False\n smoothed_normalized_score = -1.0\n smoothed_eval_normalized_score = -1.0\n smoothed_completion = 0.0\n smoothed_eval_completion = 0.0\n\n # Double Dueling DQN policy\n policy = RainbowAgent(state_size, action_size, train_params)\n\n # TensorBoard writer\n writer = SummaryWriter()\n writer.add_hparams(vars(train_params), {})\n writer.add_hparams(vars(env_params), {})\n\n training_timer = Timer()\n training_timer.start()\n\n print(\"\\n🚉 Training {} trains on {}x{} grid for {} episodes, evaluating on {} episodes every {} episodes.\\n\"\n .format(env.get_num_agents(), x_dim, y_dim, n_episodes, n_eval_episodes, checkpoint_interval))\n\n for episode_idx in range(n_episodes + 1):\n # Timers\n step_timer = Timer()\n reset_timer = Timer()\n learn_timer = Timer()\n preproc_timer = Timer()\n\n # Reset environment\n reset_timer.start()\n obs, info = env.reset(regenerate_rail=True, regenerate_schedule=True)\n reset_timer.end()\n\n if train_params.render:\n env_renderer.set_new_rail()\n\n score = 0\n nb_steps = 0\n actions_taken = []\n\n # Build agent specific observations\n for agent in env.get_agent_handles():\n if obs[agent]:\n agent_obs[agent] = normalize_observation(obs[agent], observation_tree_depth, observation_radius=observation_radius)\n agent_prev_obs[agent] = agent_obs[agent].copy()\n\n # Run episode\n for step in range(max_steps - 1):\n for agent in env.get_agent_handles():\n if info['action_required'][agent]:\n # If an action is required, we want to store the obs at that step as well as the action\n update_values = True\n action = policy.act(agent_obs[agent], eps=eps_start)\n action_count[action] += 1\n actions_taken.append(action)\n else:\n update_values = False\n action = 0\n action_dict.update({agent: action})\n\n # Environment step\n step_timer.start()\n next_obs, all_rewards, done, info = env.step(action_dict)\n step_timer.end()\n\n if train_params.render and episode_idx % checkpoint_interval == 0:\n env_renderer.render_env(\n show=True,\n frames=False,\n show_observations=False,\n show_predictions=False\n )\n\n for agent in range(env.get_num_agents()):\n # Update replay buffer and train agent\n # Only update the values when we are done or when an action was taken and thus relevant information is present\n if update_values or done[agent]:\n learn_timer.start()\n policy.step(agent_prev_obs[agent], agent_prev_action[agent], all_rewards[agent], agent_obs[agent], done[agent])\n learn_timer.end()\n\n agent_prev_obs[agent] = agent_obs[agent].copy()\n agent_prev_action[agent] = action_dict[agent]\n\n # Preprocess the new observations\n if next_obs[agent]:\n preproc_timer.start()\n agent_obs[agent] = normalize_observation(next_obs[agent], observation_tree_depth, observation_radius=observation_radius)\n preproc_timer.end()\n\n score += all_rewards[agent]\n\n nb_steps = step\n\n if done['__all__']:\n break\n\n # Epsilon decay\n eps_start = max(eps_end, eps_decay * eps_start)\n\n # Collection information about training\n tasks_finished = sum(done[idx] for idx in env.get_agent_handles())\n completion = tasks_finished / max(1, env.get_num_agents())\n normalized_score = score / (max_steps * env.get_num_agents())\n action_probs = action_count / np.sum(action_count)\n action_count = [1] * action_size\n\n # Smoothed values for terminal display and for more stable hyper-parameter tuning\n smoothing = 0.99\n smoothed_normalized_score = smoothed_normalized_score * smoothing + normalized_score * (1.0 - smoothing)\n smoothed_completion = smoothed_completion * smoothing + completion * (1.0 - smoothing)\n\n # Print logs\n if episode_idx % checkpoint_interval == 0:\n torch.save(policy.qnetwork_local, './checkpoints/multi-' + str(episode_idx) + '.pth')\n if train_params.render:\n env_renderer.close_window()\n\n print(\n '\\r🚂 Episode {}'\n '\\t 🏆 Score: {:.3f}'\n ' Avg: {:.3f}'\n '\\t 💯 Done: {:.2f}%'\n ' Avg: {:.2f}%'\n '\\t 🎲 Epsilon: {:.2f} '\n '\\t 🔀 Action Probs: {}'.format(\n episode_idx,\n normalized_score,\n smoothed_normalized_score,\n 100 * completion,\n 100 * smoothed_completion,\n eps_start,\n format_action_prob(action_probs)\n ), end=\" \")\n\n # Evaluate policy\n if episode_idx % train_params.checkpoint_interval == 0:\n scores, completions, nb_steps_eval = eval_policy(env, policy, n_eval_episodes, max_steps)\n writer.add_scalar(\"evaluation/scores_min\", np.min(scores), episode_idx)\n writer.add_scalar(\"evaluation/scores_max\", np.max(scores), episode_idx)\n writer.add_scalar(\"evaluation/scores_mean\", np.mean(scores), episode_idx)\n writer.add_scalar(\"evaluation/scores_std\", np.std(scores), episode_idx)\n writer.add_histogram(\"evaluation/scores\", np.array(scores), episode_idx)\n writer.add_scalar(\"evaluation/completions_min\", np.min(completions), episode_idx)\n writer.add_scalar(\"evaluation/completions_max\", np.max(completions), episode_idx)\n writer.add_scalar(\"evaluation/completions_mean\", np.mean(completions), episode_idx)\n writer.add_scalar(\"evaluation/completions_std\", np.std(completions), episode_idx)\n writer.add_histogram(\"evaluation/completions\", np.array(completions), episode_idx)\n writer.add_scalar(\"evaluation/nb_steps_min\", np.min(nb_steps_eval), episode_idx)\n writer.add_scalar(\"evaluation/nb_steps_max\", np.max(nb_steps_eval), episode_idx)\n writer.add_scalar(\"evaluation/nb_steps_mean\", np.mean(nb_steps_eval), episode_idx)\n writer.add_scalar(\"evaluation/nb_steps_std\", np.std(nb_steps_eval), episode_idx)\n writer.add_histogram(\"evaluation/nb_steps\", np.array(nb_steps_eval), episode_idx)\n\n smoothing = 0.9\n smoothed_eval_normalized_score = smoothed_eval_normalized_score * smoothing + np.mean(scores) * (1.0 - smoothing)\n smoothed_eval_completion = smoothed_eval_completion * smoothing + np.mean(completions) * (1.0 - smoothing)\n writer.add_scalar(\"evaluation/smoothed_score\", smoothed_eval_normalized_score, episode_idx)\n writer.add_scalar(\"evaluation/smoothed_completion\", smoothed_eval_completion, episode_idx)\n\n # Save logs to tensorboard\n writer.add_scalar(\"training/score\", normalized_score, episode_idx)\n writer.add_scalar(\"training/smoothed_score\", smoothed_normalized_score, episode_idx)\n writer.add_scalar(\"training/completion\", np.mean(completion), episode_idx)\n writer.add_scalar(\"training/smoothed_completion\", np.mean(smoothed_completion), episode_idx)\n writer.add_scalar(\"training/nb_steps\", nb_steps, episode_idx)\n writer.add_histogram(\"actions/distribution\", np.array(actions_taken), episode_idx)\n writer.add_scalar(\"actions/nothing\", action_probs[RailEnvActions.DO_NOTHING], episode_idx)\n writer.add_scalar(\"actions/left\", action_probs[RailEnvActions.MOVE_LEFT], episode_idx)\n writer.add_scalar(\"actions/forward\", action_probs[RailEnvActions.MOVE_FORWARD], episode_idx)\n writer.add_scalar(\"actions/right\", action_probs[RailEnvActions.MOVE_RIGHT], episode_idx)\n writer.add_scalar(\"actions/stop\", action_probs[RailEnvActions.STOP_MOVING], episode_idx)\n writer.add_scalar(\"training/epsilon\", eps_start, episode_idx)\n writer.add_scalar(\"training/buffer_size\", len(policy.memory), episode_idx)\n writer.add_scalar(\"training/loss\", policy.loss, episode_idx)\n writer.add_scalar(\"timer/reset\", reset_timer.get(), episode_idx)\n writer.add_scalar(\"timer/step\", step_timer.get(), episode_idx)\n writer.add_scalar(\"timer/learn\", learn_timer.get(), episode_idx)\n writer.add_scalar(\"timer/preproc\", preproc_timer.get(), episode_idx)\n writer.add_scalar(\"timer/total\", training_timer.get_current(), episode_idx)\n\n\ndef format_action_prob(action_probs):\n action_probs = np.round(action_probs, 3)\n actions = [\"↻\", \"←\", \"↑\", \"→\", \"◼\"]\n\n buffer = \"\"\n for action, action_prob in zip(actions, action_probs):\n buffer += action + \" \" + \"{:.3f}\".format(action_prob) + \" \"\n\n return buffer\n\n\ndef eval_policy(env, policy, n_eval_episodes, max_steps):\n action_dict = dict()\n scores = []\n completions = []\n nb_steps = []\n\n for episode_idx in range(n_eval_episodes):\n agent_obs = [None] * env.get_num_agents()\n score = 0.0\n\n obs, info = env.reset(regenerate_rail=True, regenerate_schedule=True)\n\n final_step = 0\n\n for step in range(max_steps - 1):\n for agent in env.get_agent_handles():\n if obs[agent]:\n # TODO pass parameters properly\n # agent_obs[agent] = normalize_observation(obs[agent], tree_depth=2, observation_radius=10)\n agent_obs[agent] = normalize_observation(obs[agent], tree_depth=2, observation_radius=10)\n\n action = 0\n if info['action_required'][agent]:\n action = policy.act(agent_obs[agent], eps=0.0)\n action_dict.update({agent: action})\n\n obs, all_rewards, done, info = env.step(action_dict)\n\n for agent in env.get_agent_handles():\n score += all_rewards[agent]\n\n final_step = step\n\n if done['__all__']:\n break\n\n normalized_score = score / (max_steps * env.get_num_agents())\n scores.append(normalized_score)\n\n tasks_finished = sum(done[idx] for idx in env.get_agent_handles())\n completion = tasks_finished / max(1, env.get_num_agents())\n completions.append(completion)\n\n nb_steps.append(final_step)\n\n print(\"\\t✅ Eval: score {:.3f} done {:.1f}%\".format(np.mean(scores), np.mean(completions) * 100.0))\n\n return scores, completions, nb_steps\n\n\nif __name__ == \"__main__\":\n # Hyperparameters\n parser = ArgumentParser(description='Rainbow')\n parser.add_argument('--id', type=str, default='default', help='Experiment ID')\n parser.add_argument('--seed', type=int, default=123, help='Random seed')\n parser.add_argument('--disable-cuda', action='store_true', help='Disable CUDA')\n parser.add_argument('--T-max', type=int, default=int(50e6), metavar='STEPS', help='Number of training steps (4x number of frames)')\n parser.add_argument('--max-episode-length', type=int, default=int(108e3), metavar='LENGTH', help='Max episode length in game frames (0 to disable)')\n parser.add_argument('--history-length', type=int, default=4, metavar='T', help='Number of consecutive states processed')\n parser.add_argument('--hidden-size', type=int, default=512, metavar='SIZE', help='Network hidden size')\n parser.add_argument('--noisy-std', type=float, default=0.1, metavar='σ', help='Initial standard deviation of noisy linear layers')\n parser.add_argument('--atoms', type=int, default=51, metavar='C', help='Discretised size of value distribution')\n parser.add_argument('--V-min', type=float, default=-10, metavar='V', help='Minimum of value distribution support')\n parser.add_argument('--V-max', type=float, default=10, metavar='V', help='Maximum of value distribution support')\n parser.add_argument('--model', type=str, metavar='PARAMS', help='Pretrained model (state dict)')\n parser.add_argument('--memory-capacity', type=int, default=int(1e6), metavar='CAPACITY', help='Experience replay memory capacity')\n parser.add_argument('--replay-frequency', type=int, default=4, metavar='k', help='Frequency of sampling from memory')\n parser.add_argument('--priority-exponent', type=float, default=0.5, metavar='ω', help='Prioritised experience replay exponent (originally denoted α)')\n parser.add_argument('--priority-weight', type=float, default=0.4, metavar='β', help='Initial prioritised experience replay importance sampling weight')\n parser.add_argument('--multi-step', type=int, default=3, metavar='n', help='Number of steps for multi-step return')\n parser.add_argument('--discount', type=float, default=0.95, metavar='γ', help='Discount factor')\n parser.add_argument('--target-update', type=int, default=int(8e3), metavar='τ', help='Number of steps after which to update target network')\n parser.add_argument('--reward-clip', type=int, default=1, metavar='VALUE', help='Reward clipping (0 to disable)')\n parser.add_argument('--learning-rate', type=float, default=0.0000625, metavar='η', help='Learning rate')\n parser.add_argument('--adam-eps', type=float, default=1.5e-4, metavar='ε', help='Adam epsilon')\n parser.add_argument('--batch-size', type=int, default=32, metavar='SIZE', help='Batch size')\n parser.add_argument('--learn-start', type=int, default=int(20e3), metavar='EPISODES', help='Number of episodes before starting training')\n parser.add_argument('--evaluate', action='store_true', help='Evaluate only')\n parser.add_argument('--evaluation-interval', type=int, default=100000, metavar='EPISODES', help='Number of episodes between evaluations')\n parser.add_argument('--evaluation-episodes', type=int, default=10, metavar='N', help='Number of evaluation episodes to average over')\n\t# TODO: Note that DeepMind's evaluation method is running the latest agent for 500K frames ever every 1M steps\n parser.add_argument('--evaluation-size', type=int, default=500, metavar='N', help='Number of transitions to use for validating Q')\n parser.add_argument('--render', action='store_true', help='Display screen (testing only)')\n parser.add_argument('--enable-cudnn', action='store_true', help='Enable cuDNN (faster but nondeterministic)')\n parser.add_argument('--checkpoint-interval', type=int, default=0, help='How often to checkpoint the model, defaults to 0 (never checkpoint)')\n parser.add_argument('--memory', help='Path to save/load the memory from')\n parser.add_argument('--disable-bzip-memory', action='store_true', help='Don\\'t zip the memory file. Not recommended (zipping is a bit slower and much, much smaller)')\n parser.add_argument('--debug', action='store_true', help='Print more info during execution')\n\t\n\t# Env parameters\n\t# parser.add_argument('--state_size', type=int, help='Size of state to feed to the neural network') # Depends on prediction_depth\n parser.add_argument('--network-action-space', type=int, default=2, help='Number of actions allowed in the environment')\n parser.add_argument('--width', type=int, default=100, help='Environment width')\n parser.add_argument('--height', type=int, default=100, help='Environment height')\n parser.add_argument('--num-agents', type=int, default=50, help='Number of agents in the environment')\n parser.add_argument('--max-num-cities', type=int, default=6, help='Maximum number of cities where agents can start or end')\n\t# parser.add_argument('--seed', type=int, default=1, help='Seed used to generate grid environment randomly')\n parser.add_argument('--grid-mode', type=bool, default=False, help='Type of city distribution, if False cities are randomly placed')\n parser.add_argument('--max-rails-between-cities', type=int, default=4, help='Max number of tracks allowed between cities, these count as entry points to a city')\n parser.add_argument('--max-rails-in-city', type=int, default=6, help='Max number of parallel tracks within a city allowed')\n parser.add_argument('--malfunction-rate', type=int, default=1000, help='Rate of malfunction occurrence of single agent')\n parser.add_argument('--min-duration', type=int, default=20, help='Min duration of malfunction')\n parser.add_argument('--max-duration', type=int, default=50, help='Max duration of malfunction')\n parser.add_argument('--observation-builder', type=str, default='GraphObsForRailEnv', help='Class to use to build observation for agent')\n parser.add_argument('--predictor', type=str, default='ShortestPathPredictorForRailEnv', help='Class used to predict agent paths and help observation building')\n\t# parser.add_argument('--bfs-depth', type=int, default=4, help='BFS depth of the graph observation')\n parser.add_argument('--prediction-depth', type=int, default=108, help='Prediction depth for shortest path strategy, i.e. length of a path')\n\t# parser.add_argument('--view-semiwidth', type=int, default=7, help='Semiwidth of field view for agent in local obs')\n\t# parser.add_argument('--view-height', type=int, default=30, help='Height of the field view for agent in local obs')\n\t# parser.add_argument('--offset', type=int, default=10, help='Offset of agent in local obs')\n\t# Training parameters\n parser.add_argument('--num-episodes', type=int, default=1000, help='Number of episodes on which to train the agents')\n\n args = parser.parse_args()\n\n \n os.environ[\"OMP_NUM_THREADS\"] = str(args.num_threads)\n train_agent(Namespace(**args), args)\n ","sub_path":"examples/reinforcement_learning/multi_agent_training_rainbow.py","file_name":"multi_agent_training_rainbow.py","file_ext":"py","file_size_in_byte":22695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"149449572","text":"from collections import OrderedDict\n\nfrom drf_yasg import openapi\nfrom drf_yasg.inspectors import FieldInspector, NotHandled\nfrom rest_framework_json_api import serializers\nfrom rest_framework_json_api.utils import (\n get_related_resource_type,\n get_resource_type_from_serializer,\n)\n\n\nclass ResourceRelatedFieldInspector(FieldInspector):\n def field_to_swagger_object(\n self, field, swagger_object_type, use_references, **kwargs\n ):\n if isinstance(field, serializers.ResourceRelatedField):\n return None\n\n return NotHandled\n\n\nclass ModelSerializerInspector(FieldInspector):\n def process_result(self, result, method_name, obj, **kwargs):\n if (\n isinstance(obj, serializers.ModelSerializer)\n and method_name == \"field_to_swagger_object\"\n ):\n model_response = self.formatted_model_result(result, obj)\n if obj.parent is None and self.view.action != \"list\":\n return self.decorate_with_data(model_response)\n\n return model_response\n\n return result\n\n def generate_relationships(self, obj):\n relationships_properties = []\n for field in obj.fields.values():\n if isinstance(field, serializers.ResourceRelatedField):\n relationships_properties.append(self.generate_relationship(field))\n if relationships_properties:\n return openapi.Schema(\n title=\"Relationships of object\",\n type=openapi.TYPE_OBJECT,\n properties=OrderedDict(relationships_properties),\n )\n\n return None\n\n def generate_relationship(self, field):\n field_schema = openapi.Schema(\n title=\"Relationship object\",\n type=openapi.TYPE_OBJECT,\n properties=OrderedDict(\n (\n (\n \"type\",\n openapi.Schema(\n type=openapi.TYPE_STRING,\n title=\"Type of related object\",\n enum=[get_related_resource_type(field)],\n ),\n ),\n (\n \"id\",\n openapi.Schema(\n type=openapi.TYPE_STRING, title=\"ID of related object\"\n ),\n ),\n )\n ),\n )\n return field.field_name, self.decorate_with_data(field_schema)\n\n def formatted_model_result(self, result, obj):\n return openapi.Schema(\n type=openapi.TYPE_OBJECT,\n required=[\"properties\"],\n properties=OrderedDict(\n (\n (\n \"type\",\n openapi.Schema(\n type=openapi.TYPE_STRING,\n enum=[get_resource_type_from_serializer(obj)],\n title=\"Type of related object\",\n ),\n ),\n (\n \"id\",\n openapi.Schema(\n type=openapi.TYPE_STRING,\n title=\"ID of related object\",\n read_only=True,\n ),\n ),\n (\"attributes\", result),\n (\"relationships\", self.generate_relationships(obj)),\n )\n ),\n )\n\n def decorate_with_data(self, result):\n return openapi.Schema(\n type=openapi.TYPE_OBJECT,\n required=[\"data\"],\n properties=OrderedDict(((\"data\", result),)),\n )\n","sub_path":"server/api/docs/inspectors.py","file_name":"inspectors.py","file_ext":"py","file_size_in_byte":3712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"563243962","text":"from joints.models import Joints, States, Reviews\nfrom django.shortcuts import get_object_or_404, render_to_response\n\nfrom django.template import RequestContext\n\nimport datetime\n\ndef index(request):\n list_of_states = States.objects.all().order_by('name')\n context = RequestContext(request)\n return render_to_response('states_listing.html', {'list_of_states': list_of_states}, context_instance=context)\n \ndef state(request, state_abbr):\n try:\n j = Joints.objects.filter(state__state_abbr=state_abbr)\n s = States.objects.get(state_abbr=state_abbr)\n except Joints.DoesNotExist:\n raise Http404\n context = RequestContext(request)\n return render_to_response('state.html', {'joints': j, 'state': s}, context_instance=context)\n \ndef joint(request, joint_id):\n try:\n j = Joints.objects.get(pk=joint_id)\n s = States.objects.get(state_abbr=j.state)\n except Joints.DoesNotExist:\n raise Http404\n # Get reviews\n try:\n reviews = Reviews.objects.filter(joint=joint_id)\n except Reviews.DoesNotExist:\n reviews = None\n # Check if User has written a review already\n try:\n user_review = Reviews.objects.get(user=request.user.id, joint=joint_id)\n except Reviews.DoesNotExist:\n user_review = None\n context = RequestContext(request)\n return render_to_response('joint.html', {'joint': j, 'state': s, 'reviews': reviews, 'user_review':user_review }, context_instance=RequestContext(request))\n \ndef review(request, joint_id):\n try:\n j = Joints.objects.get(pk=joint_id)\n s = States.objects.get(state_abbr=j.state)\n p = request.POST\n # If User already has review, grab primary key for update\n try:\n user_review = Reviews.objects.get(user=request.user.id, joint=joint_id)\n user_pk = user_review.id\n user_created = user_review.created\n except:\n user_pk = None\n user_created = datetime.datetime.now()\n except Joints.DoesNotExist:\n raise Http404\n else: \n Reviews(pk=user_pk, joint_id=joint_id, user_id=request.user.id, rating=p['rating'], review=p['review'], created = user_created, updated = datetime.datetime.now()).save()\n # Reviews(pk=user_pk, joint_id=joint_id, user_id=request.user.id, rating=p['rating'], review=p['review']).save()\n context = RequestContext(request)\n return render_to_response('joint.html', {'joint': j, 'state': s, 'request': p}, context_instance=RequestContext(request))\n","sub_path":"joints/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"53650709","text":"def all_q(l):\n return all(x == '?' for x in l)\n\ndef solve():\n r, c = map(int, input().split())\n cake = [list(input()) for _ in range(r)]\n\n source = 0\n\n for i in range(r):\n if not all_q(cake[i]):\n # find first non ?\n j = 0\n while cake[i][j] == '?':\n j += 1\n\n for k in range(j):\n cake[i][k] = cake[i][j]\n\n last = cake[i][j]\n for k in range(j + 1, c):\n if cake[i][k] == '?':\n cake[i][k] = last\n else:\n last = cake[i][k]\n\n source = i\n\n for i in range(source + 1, r):\n if all_q(cake[i]):\n cake[i] = cake[i - 1][:]\n\n for i in range(source - 1, -1, -1):\n if all_q(cake[i]):\n cake[i] = cake[i + 1][:]\n\n for row in cake:\n print(''.join(row))\n\ndef main():\n t = int(input())\n for tt in range(t):\n print('Case #{}:'.format(tt + 1))\n solve()\n\nmain()\n","sub_path":"gcj/2017rd1a/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"75119526","text":"import numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\n\n\nclass QSP(keras.layers.Layer):\n \"\"\"Parameterized quantum signal processing layer.\n\n The `QSP` layer implements the quantum signal processing circuit with trainable QSP angles.\n The input of the layer is/are theta(s) where x = cos(theta), and w(x) is X rotation in the QSP sequence.\n\n The output is the real part of the upper left element in the resulting unitary that describes the whole sequence.\n This is Re[P(x)] in the representation of the QSP unitary from Gilyen et al.\n\n Input is of the form:\n [[theta1], [theta2], ... ]\n\n Output is of the form:\n [[P(x1)], [P(x2)], ...]\n\n The layer requires the desired polynomial degree of P(x)\n\n \"\"\"\n\n def __init__(self, poly_deg=0, measurement=\"z\"):\n \"\"\"\n Params\n ------\n poly_deg: The desired degree of the polynomial in the QSP sequence.\n the layer will be parameterized with poly_deg + 1 trainable phi.\n measurement :\n measurement basis using the Wx model, {\"x\", \"z\"}\n \"\"\"\n super(QSP, self).__init__()\n self.poly_deg = poly_deg\n phi_init = tf.random_uniform_initializer(minval=0, maxval=np.pi)\n self.phis = tf.Variable(\n initial_value=phi_init(shape=(poly_deg + 1, 1), dtype=tf.float32),\n trainable=True,\n )\n self.measurement = measurement\n\n def call(self, th):\n batch_dim = tf.gather(tf.shape(th), 0)\n\n # tiled up X rotations (input W(x))\n px = tf.constant([[0.0, 1], [1, 0]], dtype=tf.complex64)\n px = tf.expand_dims(px, axis=0)\n px = tf.repeat(px, [batch_dim], axis=0)\n\n rot_x_arg = tf.complex(real=0.0, imag=th)\n rot_x_arg = tf.expand_dims(rot_x_arg, axis=1)\n rot_x_arg = tf.tile(rot_x_arg, [1, 2, 2])\n\n wx = tf.linalg.expm(tf.multiply(px, rot_x_arg))\n\n # tiled up Z rotations\n pz = tf.constant([[1.0, 0], [0, -1]], dtype=tf.complex64)\n pz = tf.expand_dims(pz, axis=0)\n pz = tf.repeat(pz, [batch_dim], axis=0)\n\n z_rotations = []\n for k in range(self.poly_deg + 1):\n phi = self.phis[k]\n rot_z_arg = tf.complex(real=0.0, imag=phi)\n rot_z_arg = tf.expand_dims(rot_z_arg, axis=0)\n rot_z_arg = tf.expand_dims(rot_z_arg, axis=0)\n rot_z_arg = tf.tile(rot_z_arg, [batch_dim, 2, 2])\n\n rz = tf.linalg.expm(tf.multiply(pz, rot_z_arg))\n z_rotations.append(rz)\n\n u = z_rotations[0]\n for rz in z_rotations[1:]:\n u = tf.matmul(u, wx)\n u = tf.matmul(u, rz)\n\n # assume we are interested in the real part of p(x) and the real part of q(x) in\n # the resulting qsp unitary\n if self.measurement == \"z\":\n return tf.math.real(u[:, 0, 0]), tf.math.imag(u[:, 0, 0])\n elif self.measurement == \"x\":\n return tf.math.real(u[:, 0, 0]), tf.math.imag(u[:, 0, 1])\n else:\n raise ValueError(\n \"Invalid measurement basis: {}\".format(self.measurement))\n\n\ndef construct_qsp_model(poly_deg, measurement=\"z\"):\n \"\"\"Helper function that compiles a QSP model with mean squared error and adam optimizer.\n\n Params\n ------\n poly_deg : int\n the desired degree of the polynomial in the QSP sequence.\n measurement :\n measurement basis using the Wx model, {\"x\", \"z\"}\n\n Returns\n -------\n Keras model\n a compiled keras model with trainable phis in a poly_deg QSP sequence.\n \"\"\"\n theta_input = tf.keras.Input(shape=(1,), dtype=tf.float32, name=\"theta\")\n qsp = QSP(poly_deg, measurement=measurement)\n real_parts = qsp(theta_input)\n model = tf.keras.Model(inputs=theta_input, outputs=real_parts)\n optimizer = tf.keras.optimizers.Adam(learning_rate=0.1)\n loss = tf.keras.losses.MeanSquaredError()\n model.compile(optimizer=optimizer, loss=loss)\n return model\n","sub_path":"pyqsp/qsp_models/qsp_layers.py","file_name":"qsp_layers.py","file_ext":"py","file_size_in_byte":3967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"311504182","text":"# -*- coding: utf-8 -*-\n\n\nimport spacy\nfrom torchtext import datasets, data\nimport torchtext\nimport random\nimport torch\n\nROOT_PATH = \"C:/Users/jimon/Projects/word-embeddings-benchmarks/web_data/\"\nDOWNSTREAM_PATH = ROOT_PATH+ 'downstream'\nSEED = 1234\ntorch.manual_seed(SEED)\ntorch.cuda.manual_seed(SEED)\n\n\"\"\"\nFetch dataset for testing attributional word_similarity\n\nReturns\n-------\ndata : sklearn.datasets.base.Bunch\n dictionary-like object. Keys of interest:\n 'X': matrix of 2 words per column,\n 'y': vector with scores,\n\"\"\"\n\nspacy_en = spacy.load('en')\ndef tokenizer(text):\n return [tok.text for tok in spacy_en.tokenizer(text)]\n\n\ndef tabular_iter(text, label):\n train, val, test = torchtext.data.TabularDataset.splits(\n path='./data/', train='train.tsv',\n validation='val.tsv', test='test.tsv', format='tsv',\n fields=[('Text', text), ('Label', label)])\n data = {'train': train, 'val': val, 'test': test }\n return (data)\n\ndef tt_bucket_iter(train, test):\n # make iterator for splits\n train_iter, test_iter = data.BucketIterator.splits(\n (train, test), batch_size=3, device=0)\n return (train_iter, test_iter)\n\ndef flatten(l):\n return ([item for sublist in l for item in sublist])\n\n\n\n\nclass Downstream():\n # vocab set to True, we just deal with returning vocab, otherwise the train, test, val splits\n def __init__(self):\n self.TEXT = data.Field(sequential=True, tokenize=tokenizer, lower=True)\n self.LABEL = data.Field(sequential=False)\n\n # below two functions for when you want to pass in your own data\n def get_data(self, train):\n # build the vocabulary\n self.TEXT.build_vocab(train, max_size=30000)\n # vectors=GloVe(name='6B', dim=300))\n self.LABEL.build_vocab(train)\n return (self.TEXT, self.LABEL)\n\n def get_iters(self, train, test):\n train_iter, test_iter = tt_bucket_iter(train, test)\n data = {'train': train_iter, 'test': test_iter}\n return (data)\n\n def fetch_SENT_vocab(self):\n try:\n vocab = self.sent_train + self.sent_valid +self.sent_test\n except:\n train, self.sent_test = datasets.IMDB.splits(self.TEXT, self.LABEL)\n self.sent_train, self.sent_valid = train.split(random_state=random.seed(SEED))\n self.TEXT.build_vocab(self.pos_train.word, min_freq=3)\n #self.TEXT.build_vocab(self.pos_valid.word, min_freq=3)\n #self.TEXT.build_vocab(self.pos_test.word, min_freq=3)\n vocab = self.TEXT.vocab.itos\n return (vocab)\n\n def fetch_SENT(self):\n train, self.sent_test = datasets.IMDB.splits(self.TEXT, self.LABEL)\n self.sent_train, self.sent_valid = train.split(random_state=random.seed(SEED))\n device = \"cuda:0\" if torch.cuda.is_available() else 'cpu'\n train_iter, val_iter, test_iter = data.BucketIterator.splits(\n (self.sent_train, self.sent_valid, self.sent_test), batch_size= 20, device=device)\n return (train_iter, val_iter, test_iter)\n\n #data = self.get_iters(self.sent_train, self.sent_test)\n #return (data)\n\n def fetch_POS_vocab(self):\n try:\n vocab = self.pos_train + self.pos_valid +self.pos_test\n except:\n # Define the fields associated with the sequences.\n WORD = data.Field(init_token=\"\", eos_token=\"\")\n UD_TAG = data.Field(init_token=\"\", eos_token=\"\")\n PTB_TAG = data.Field(init_token=\"\", eos_token=\"\")\n self.pos_train, self.pos_valid, self.pos_test = datasets.UDPOS.splits(fields=(('word', WORD), ('udtag', UD_TAG), ('ptbtag', PTB_TAG)))\n print(self.pos_train.fields)\n print(len(self.pos_train))\n print(vars(self.pos_train[0]))\n WORD.build_vocab(self.pos_train.word, min_freq=3)\n #WORD.build_vocab(self.pos_valid.word, min_freq=3)\n #WORD.build_vocab(self.pos_test.word, min_freq=3)\n UD_TAG.build_vocab(self.pos_train.udtag)\n PTB_TAG.build_vocab(self.pos_train.ptbtag)\n vocab = WORD.vocab.itos\n return (vocab)\n\n def fetch_POS(self):\n train, self.pos_test = datasets.UDPOS.splits(self.TEXT, self.LABEL)\n self.pos_train, self.pos_valid = train.split(random_state=random.seed(SEED))\n device = \"cuda:0\" if torch.cuda.is_available() else 'cpu'\n train_iter, val_iter, test_iter = data.BucketIterator.splits(\n (self.pos_train, self.pos_valid, self.pos_test), batch_size= 20, device=device)\n\n print(\"Batch Info\")\n batch = next(iter(train_iter))\n print(batch.text)\n print(batch.label)\n\n return (train_iter, val_iter, test_iter)\n\n #data = self.get_iters(self.pos_train, self.pos_test)\n #return (data)\n\n def fetch_TREC_vocab(self):\n try:\n vocab = self.get_vocab(self.trec_train, self.trec_test)\n except:\n train, self.trec_test = datasets.TREC.splits(self.TEXT, self.LABEL, fine_grained=True)\n self.trec_train, self.trec_valid = train.split(random_state=random.seed(SEED))\n self.TEXT.build_vocab(self.trec_train.word, min_freq=3)\n #self.TEXT.build_vocab(self.trec_valid.word, min_freq=3)\n #self.TEXT.build_vocab(self.trec_test.word, min_freq=3)\n vocab = self.TEXT.vocab.itos\n return (vocab)\n\n def fetch_TREC(self):\n train, self.trec_test = datasets.TREC.splits(self.TEXT, self.LABEL, fine_grained=True)\n self.trec_train, self.trec_valid = train.split(random_state=random.seed(SEED))\n device = \"cuda:0\" if torch.cuda.is_available() else 'cpu'\n train_iter, val_iter, test_iter = data.BucketIterator.splits(\n (self.trec_train, self.trec_valid, self.trec_test), batch_size= 20, device=device)\n return (train_iter, val_iter, test_iter)\n #data = self.get_iters(self.trec_train, self.trec_test)\n #return (data)\n\n","sub_path":"data/downstream.py","file_name":"downstream.py","file_ext":"py","file_size_in_byte":5991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"172449832","text":"import requests\nfrom apis.config import base_url\n\n__author__ = 'Antonio Martín González'\n__email__ = 'ant.martin.gonzalez@gmail.com'\n\n\ndef get_vehicles(id: int = None):\n url = base_url + '/vehicles'\n if id is not None:\n url = url + f'/{id}'\n response = requests.get(url=url)\n return response\n","sub_path":"apis/vehicles.py","file_name":"vehicles.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"608181849","text":"from sys import version_info\r\nimport pandas as pd\r\nfrom helpers import valid_data\r\nfrom models import VGG as Factory\r\nfrom helpers.model_builder import fit_and_predict, build_model\r\n\r\nif version_info.major != 3 and version_info.minor != 6:\r\n raise Exception('Version error.')\r\n\r\nvalid_data()\r\n\r\ndf = pd.DataFrame()\r\n\r\nmodel = build_model(Factory)\r\n\r\npred, class_indices = fit_and_predict(model.vgg_16_standard(),\r\n image_size=model.get_image_size(),\r\n batch_size=64,\r\n save_name='vgg',\r\n epochs=50)\r\n# epochs=1,\r\n# steps_per_epoch=1)\r\n\r\nprint(class_indices)\r\n\r\nfor i in range(len(pred)):\r\n index = i - 1\r\n pairs = pred[i]\r\n df.at[index, 'cat'] = pairs[0]\r\n df.at[index, 'dog'] = pairs[1]\r\n # print(pairs)\r\n\r\ndf.to_csv('pred.csv', index=None)\r\nprint(df.head(10))\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"290028300","text":"# -*- coding: utf-8 -*-\n# @Author: WuLC\n# @Date: 2016-06-01 22:36:33\n# @Last modified by: WuLC\n# @Last Modified time: 2016-06-03 23:05:49\n# @Email: liangchaowu5@gmail.com\n\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n\n# method 1,recursively\nclass Solution(object):\n def postorderTraversal(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[int]\n \"\"\"\n result = []\n if root == None:\n return result\n result += self.postorderTraversal(root.left)\n result += self.postorderTraversal(root.right)\n result.append(root.val)\n return result\n\n\n# method 2,iteratively\nclass Solution(object):\n def postorderTraversal(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[int]\n \"\"\"\n stack, result = [], []\n curr_node = root\n while len(stack)!=0 or curr_node != None:\n if curr_node != None:\n # result.insert(0, curr_node.val)\n result.append(curr_node.val)\n stack.append(curr_node)\n curr_node = curr_node.right\n else:\n tmp = stack.pop()\n curr_node = tmp.left\n result.reverse()\n return result","sub_path":"Algorithm/Python/145. Binary Tree Postorder Traversal.py","file_name":"145. Binary Tree Postorder Traversal.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"599202543","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 5 15:32:00 2020\n\n@author: Devineni\n\"\"\"\n\nimport pandas as pd\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nfrom uncertainties import ufloat_fromstr\nfrom uncertainties import ufloat\n\nimport xlrd\n\nimport pymysql\nfrom sqlalchemy import create_engine\n\ndef prRed(skk): print(\"\\033[31;1;m {}\\033[00m\" .format(skk)) \nimport datetime as dt\n\nc = ['#179C7D','#F29400','#1F82C0','#E2001A','#B1C800']\n# c = ['#179C7D','#F29400','#1F82C0']*4\n#%%\nimport datetime\nimport matplotlib.dates as mdates\n\nimport matplotlib.units as munits\nfrom pylab import rcParams\nrcParams['figure.figsize'] = 7,4.5\nplt.rcParams[\"font.family\"] = \"calibri\"\nplt.rcParams[\"font.weight\"] = \"normal\"\nplt.rcParams[\"font.size\"] = 10\n\nfont = {'family': 'calibri',\n 'color': 'black',\n 'weight': 'normal',\n 'size': 16,\n }\ndf = pd.read_excel(\"C:/Users/Devineni/OneDrive - bwedu/MA_Raghavakrishna/1_Evaluation/results/results_final.xlsx\", sheet_name=\"plotting\")\ndf1 = df.iloc[:8] \ndf2 = df.iloc[8:]\n \n\n#%%\ndef autolabel(rects):\n \"\"\"Attach a text label above each bar in *rects*, displaying its height.\"\"\"\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{:.0f}%'.format(round(height,0)),\n xy=(rect.get_x() + rect.get_width() / 4, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')\n \n\nbdf = pd.read_excel(\"C:/Users/Devineni/OneDrive - bwedu/MA_Raghavakrishna/1_Evaluation/results/results_final.xlsx\", sheet_name=\"barplot\")\nbdf1 = bdf.iloc[:8] \nbdf2 = bdf.iloc[8:]\nktbdf = bdf.loc[16:]\nrbdf = bdf.loc[0:15]\n\n\n#%% Ea Bar plot\nfrom matplotlib.lines import Line2D\n\nfig, ax = plt.subplots()\nfor i in range(0,len(rbdf),4):\n rects1 = ax.bar(rbdf[\"Measurement\"].to_numpy()[i:i+2], rbdf[\"Ea%\"].to_numpy()[i:i+2],yerr=rbdf[\"std%\"].to_numpy()[i:i+2],align='center',color = ['#1F77B4','#FF7F0E'],ecolor='black',capsize=10, edgecolor = c[4])\n rects2 = ax.bar(rbdf[\"Measurement\"].to_numpy()[i+2:i+4], rbdf[\"Ea%\"].to_numpy()[i+2:i+4],yerr=rbdf[\"std%\"].to_numpy()[i+2:i+4],align='center',color = ['#1F77B4','#FF7F0E'],ecolor='black',capsize=10)\n autolabel(rects1)\n autolabel(rects2)\n\nrects3 = ax.bar(ktbdf[\"Measurement\"].to_numpy()[0:2], ktbdf[\"Ea%\"].to_numpy()[0:2],yerr=ktbdf[\"std%\"].to_numpy()[0:2],align='center',color = '#FF7F0E',ecolor='black',capsize=10, edgecolor = c[4])\nrects4 = ax.bar(ktbdf[\"Measurement\"].to_numpy()[2:4], ktbdf[\"Ea%\"].to_numpy()[2:4],yerr=ktbdf[\"std%\"].to_numpy()[2:4],align='center',color = '#FF7F0E',ecolor='black',capsize=10)\nautolabel(rects3)\nautolabel(rects4)\n\n\nlegend_elements = [Line2D([0], [0], marker='o', color='w', label='Balanced case',markerfacecolor='#1F77B4', markersize=10),\n Line2D([0], [0], marker='o', color='w', label='Exhaust case',markerfacecolor='#FF7F0E', markersize=10),\n Line2D([0], [0], color=c[4], label='summer',markerfacecolor='#FF7F0E', markersize=10),\n \n ]\n\nax.legend(handles=legend_elements, loc='upper right', labelspacing = 1)\n\n\nax.set_ylabel('Global Air Exchange Efficiency ea')\nax.set_xticks(bdf[\"Measurement\"].to_numpy())\nax.set_xticklabels(bdf[\"Measurement\"].to_numpy(), rotation='vertical')\nax.set_title('ESHL ea comparison for Summer and Winter (All Sensors)')\n# ax.yaxis.grid(b=True, color='#C0C0C0', linestyle='-')\n\nax.axhline(y=50, color = c[0], linestyle='dashed')\n\nfig = plt.gcf() # get current figure\n# fig.set_size_inches(22, 9)\n# when saving, specify the DPI\nplt.tight_layout()\nplt.savefig(\"myplot.png\", dpi = 100)\n\n# Save the figure and show\nplt.tight_layout()\n \n\nplt.show() \n \n# path = \"C:/Users/Devineni/OneDrive - bwedu/MA_Raghavakrishna/1_Evaluation/python_files/plots/Ea_plots/\"\n# plt.savefig(path+\"Ea_bar_plot.png\", dpi = 300, bbox_inches='tight')\n\n\n\n\n\n\n \n \n \n \n \n#%% Temperature effect on Ea\n\nfrom uncertainties import unumpy\nfig, ax = plt.subplots()\n\nfor line in range(0,df1.shape[0]):\n ax.text((df1[\"Delta T\"][line]+0.025), (df1[\"Ea\"][line]), df1[\"Measurement\"][line], horizontalalignment='center', size=12, color='black', weight='normal') \n \n \ndf1.plot.scatter(\"Delta T\",\"Ea\", ax = ax)\n \nfor i in range(4):\n plt.errorbar(df1.iloc[[i,i+4],:][\"Delta T\"].to_numpy(), df1.iloc[[i,i+4],:][\"Ea\"].to_numpy(),df1.iloc[[i,i+4],:][\"std\"].to_numpy(), capsize = 4, label = df1.iloc[i][\"Measurement\"][2:6])\nplt.ylim(0.2,0.8)\nax.axhline(0.5,color = c[0], linestyle='dashed')\n\nplt.legend()\n\nplt.show() \n \n# path = \"C:/Users/Devineni/OneDrive - bwedu/MA_Raghavakrishna/1_Evaluation/python_files/plots/Ea_plots/\"\n# plt.savefig(path+\"del_T_vs_Ea_Herdern.png\", dpi = 300, bbox_inches='tight')\n\n\n #%%%\nfig, ax = plt.subplots()\n\nfor line in range(0,df2.shape[0]):\n ax.text((df2.iloc[line,:][\"Delta T\"]+0.025), (df2.iloc[line,:][\"Ea\"]), df2.iloc[line,:][\"Measurement\"], horizontalalignment='center', size=12, color='black', weight='normal') \n \n \ndf2.plot.scatter(\"Delta T\",\"Ea\", ax = ax)\n \nfor i in range(6):\n plt.errorbar(df2.iloc[[i,i+6],:][\"Delta T\"].to_numpy(), df2.iloc[[i,i+6],:][\"Ea\"].to_numpy(), df2.iloc[[i,i+6],:][\"std\"].to_numpy(), capsize =4, label = df2.iloc[i][\"Measurement\"][2:6])\nplt.ylim(0.2,0.8)\nplt.xlim(-25,15)\nplt.legend()\nplt.show() \n\n\n# path = \"C:/Users/Devineni/OneDrive - bwedu/MA_Raghavakrishna/1_Evaluation/python_files/plots/Ea_plots/\"\n# plt.savefig(path+\"del_T_vs_Ea_ESHL.png\", dpi = 300, bbox_inches='tight') \n\n\n#%% 1) NTC vs Air age Herdern\nfig, ax = plt.subplots()\nax.plot(np.array([0,5]),np.array([0,5]), color = c[0] , label = \"Mixed ventilation\", linestyle='dashed')\n\n\nfor i in range(len(df1)):\n if \"W_\" in df1.iloc[i,:][\"Measurement\"]:\n plt.scatter(df1.iloc[[i],:][\"NTC\"].to_numpy(), df1.iloc[[i],:][\"tau\"].to_numpy(), color = df1.iloc[i,:][\"color\"] , marker=(5, 2), label = \"winter\")\n else:\n plt.scatter(df1.iloc[[i],:][\"NTC\"].to_numpy(), df1.iloc[[i],:][\"tau\"].to_numpy(), color = df1.iloc[i,:][\"color\"] , label = \"summer\")\n\n# syntax to not repeat lables\nhandles, labels = plt.gca().get_legend_handles_labels()\nby_label = dict(zip(labels, handles))\nplt.legend(by_label.values(), by_label.keys())\n\n# path = \"C:/Users/Devineni/OneDrive - bwedu/MA_Raghavakrishna/1_Evaluation/python_files/plots/Ea_plots/\"\n# plt.savefig(path+\"NTC_vs_tau_herdern.png\", dpi = 300, bbox_inches='tight')\n\n#%% 2) NTC vs Air age ESHL\nfig, ax = plt.subplots()\nax.plot(np.array([0,5]),np.array([0,5]), color = c[0] , label = \"Mixed ventilation\", linestyle='dashed')\n\n\nfor i in range(len(df2)):\n if \"W_\" in df2.iloc[i,:][\"Measurement\"]:\n plt.scatter(df2.iloc[[i],:][\"NTC\"].to_numpy(), df2.iloc[[i],:][\"tau\"].to_numpy(), color = df2.iloc[i,:][\"color\"] , marker=(5, 2), label = \"winter\")\n else:\n plt.scatter(df2.iloc[[i],:][\"NTC\"].to_numpy(), df2.iloc[[i],:][\"tau\"].to_numpy(), color = df2.iloc[i,:][\"color\"] , label = \"summer\")\n\n# syntax to not repeat lables\nhandles, labels = plt.gca().get_legend_handles_labels()\nby_label = dict(zip(labels, handles))\nplt.legend(by_label.values(), by_label.keys())\n\n# path = \"C:/Users/Devineni/OneDrive - bwedu/MA_Raghavakrishna/1_Evaluation/python_files/plots/Ea_plots/\"\n# plt.savefig(path+\"NTC_vs_tau_ESHL.png\", dpi = 300, bbox_inches='tight')\n\n#%% 3) ACH vs Air age Herdern and ESHL\n#%%% curve plot\nfig, ax = plt.subplots()\nx = np.linspace(0.15,2.75,100)\n\ny = 1/(x)\ny1 = 1/(2*x)\nax.plot(x,y1, color = c[3], linestyle='dashed')\n\nax.plot(x,y, color = c[0], linestyle='dashed')\n\n\nax.axvline(0.6, color = 'silver', linestyle='dotted', label = \"DIN EN 15251\")\nax.axvline(0.42, color = '#1F82C0', linestyle='dotted', label = \"Herdern DIN 1946-6\" )\nax.axvline(0.48, color = '#002060', linestyle='dotted', label = \"ESHL DIN 1946-6\")\n\n\nfor i in range(len(df)):\n if \"Herdern\" in df.iloc[i,:][\"Measurement\"]:\n if \"W_\" in df.iloc[i,:][\"Measurement\"]:\n plt.scatter(df.iloc[[i],:][\"ACH\"].to_numpy(), df.iloc[[i],:][\"tau\"].to_numpy(), color = df.iloc[i,:][\"color\"] , marker=(5, 2), label = \"Herdern winter\")\n else:\n plt.scatter(df.iloc[[i],:][\"ACH\"].to_numpy(), df.iloc[[i],:][\"tau\"].to_numpy(), color = df.iloc[i,:][\"color\"] , label = \"Herdern summer\")\n else:\n if \"W_\" in df.iloc[i,:][\"Measurement\"]:\n plt.scatter(df.iloc[[i],:][\"ACH\"].to_numpy(), df.iloc[[i],:][\"tau\"].to_numpy(), color = df.iloc[i,:][\"color\"] , marker=(5, 2), label = \"ESHL winter\")\n else:\n plt.scatter(df.iloc[[i],:][\"ACH\"].to_numpy(), df.iloc[[i],:][\"tau\"].to_numpy(), color = df.iloc[i,:][\"color\"] , label = \"ESHL summer\")\n# for line in range(0,df.shape[0]):\n# ax.text((df[\"ACH\"][line]), (df[\"tau\"][line]), df[\"Measurement\"][line], horizontalalignment='center', size=12, color='black', weight='normal') \n \n\n# syntax to not repeat lables\nhandles, labels = plt.gca().get_legend_handles_labels()\nby_label = dict(zip(labels, handles))\nplt.legend(by_label.values(), by_label.keys())\n\n# path = \"C:/Users/Devineni/OneDrive - bwedu/MA_Raghavakrishna/1_Evaluation/python_files/plots/Ea_plots/\"\n# plt.savefig(path+\"ACH_vs_tau_herdern_eshl.png\", dpi = 300, bbox_inches='tight')\n\n#%%% log - plot\nfig, ax = plt.subplots()\nx = np.linspace(0.15,2.75,100)\n\ny = 1/(x)\ny1 = 1/(2*x)\nax.plot(x,y1, color = c[3], linestyle='dashed')\n\nax.plot(x,y, color = c[0], linestyle='dashed')\n\nax.axvline(0.2, color = 'silver')\nax.axvline(0.4, color = 'silver')\nax.axvline(0.6, color = 'silver', linestyle='dotted', label = \"DIN EN 15251\")\nax.axvline(0.42, color = '#1F82C0', linestyle='dotted', label = \"Herdern DIN 1946-6\" )\nax.axvline(0.48, color = '#002060', linestyle='dotted', label = \"ESHL DIN 1946-6\")\nax.axvline(0.8, color = 'silver')\nax.axvline(1, color = 'silver')\nax.axvline(2, color = 'silver')\nax.axvline(3, color = 'silver')\n\nplt.yscale('log')\nplt.xscale('log')\nfor i in range(len(df)):\n if \"Herdern\" in df.iloc[i,:][\"Measurement\"]:\n if \"W_\" in df.iloc[i,:][\"Measurement\"]:\n plt.scatter(df.iloc[[i],:][\"ACH\"].to_numpy(), df.iloc[[i],:][\"tau\"].to_numpy(), color = df.iloc[i,:][\"color\"] , marker=(5, 2), label = \"Herdern winter\")\n else:\n plt.scatter(df.iloc[[i],:][\"ACH\"].to_numpy(), df.iloc[[i],:][\"tau\"].to_numpy(), color = df.iloc[i,:][\"color\"] , label = \"Herdern summer\")\n else:\n if \"W_\" in df.iloc[i,:][\"Measurement\"]:\n plt.scatter(df.iloc[[i],:][\"ACH\"].to_numpy(), df.iloc[[i],:][\"tau\"].to_numpy(), color = df.iloc[i,:][\"color\"] , marker=(5, 2), label = \"ESHL winter\")\n else:\n plt.scatter(df.iloc[[i],:][\"ACH\"].to_numpy(), df.iloc[[i],:][\"tau\"].to_numpy(), color = df.iloc[i,:][\"color\"] , label = \"ESHL summer\")\n# syntax to not repeat lables\nhandles, labels = plt.gca().get_legend_handles_labels()\nby_label = dict(zip(labels, handles))\nplt.legend(by_label.values(), by_label.keys(), loc = \"lower left\")\n\n# path = \"C:/Users/Devineni/OneDrive - bwedu/MA_Raghavakrishna/1_Evaluation/python_files/plots/Ea_plots/\"\n# plt.savefig(path+\"ACH_vs_tau_log.png\", dpi = 300, bbox_inches='tight')\n\n\n\n\n\n#%% ACH vs EAi\n\nfig, ax = plt.subplots()\n\nax.axvline(0.6, color = 'silver', linestyle='dotted', label = \"DIN EN 15251\")\nax.axvline(0.42, color = '#1F82C0', linestyle='dotted', label = \"Herdern DIN 1946-6\" )\nax.axvline(0.48, color = '#002060', linestyle='dotted', label = \"ESHL DIN 1946-6\")\nax.axhline(50,color = c[0], linestyle='dashed')\n\n\nfor i in range(len(df)):\n if \"Herdern\" in df.iloc[i,:][\"Measurement\"]:\n if \"W_\" in df.iloc[i,:][\"Measurement\"]:\n plt.scatter(df.iloc[[i],:][\"ACH\"].to_numpy(), df.iloc[[i],:][\"Ea%\"].to_numpy(), color = df.iloc[i,:][\"color\"] , marker=(5, 2), label = \"Herdern winter\")\n else:\n plt.scatter(df.iloc[[i],:][\"ACH\"].to_numpy(), df.iloc[[i],:][\"Ea%\"].to_numpy(), color = df.iloc[i,:][\"color\"] , label = \"Herdern summer\")\n else:\n if \"W_\" in df.iloc[i,:][\"Measurement\"]:\n plt.scatter(df.iloc[[i],:][\"ACH\"].to_numpy(), df.iloc[[i],:][\"Ea%\"].to_numpy(), color = df.iloc[i,:][\"color\"] , marker=(5, 2), label = \"ESHL winter\")\n else:\n plt.scatter(df.iloc[[i],:][\"ACH\"].to_numpy(), df.iloc[[i],:][\"Ea%\"].to_numpy(), color = df.iloc[i,:][\"color\"] , label = \"ESHL summer\")\n\n\n\nfrom scipy.optimize import curve_fit\nspl_data = df.loc[:,[\"Measurement\",\"ACH\",\"Ea\"]]\nspl_data1 = spl_data.iloc[:8] \nspl_data2 = spl_data.iloc[8:]\nspl_data1 = spl_data1.sort_values(by=['ACH'])\nspl_data2 = spl_data2.sort_values(by=['ACH'])\n\ndef func(x, a, b):\n return a * pow(x,-b) \nxdata = spl_data1[\"ACH\"].to_numpy()\nydata = spl_data1[\"Ea\"].to_numpy()\n\npopt, pcov = curve_fit(func, xdata, ydata)\nx = np.linspace(0.15,2.75,100)\ny = popt[0]*pow(x,-popt[1])\n\nplt.plot(x, y*100, '#1F82C0', label = r'$\\mathrm{\\varepsilon}^{\\mathrm{a}}$' +\" Herdern\")\n\n\nxdata = spl_data2[\"ACH\"].to_numpy()\nydata = spl_data2[\"Ea\"].to_numpy()\n\n\npopt, pcov = curve_fit(func, xdata, ydata)\nx = np.linspace(0.15,2.75,100)\ny = popt[0]*pow(x,-popt[1])\n\nplt.plot(x, y*100, '#002060')\n\n\n\n\n\nplt.ylim(0,100)\n# syntax to not repeat lables\nhandles, labels = plt.gca().get_legend_handles_labels()\nby_label = dict(zip(labels, handles))\nplt.legend(by_label.values(), by_label.keys())\n\n# path = \"C:/Users/Devineni/OneDrive - bwedu/MA_Raghavakrishna/1_Evaluation/python_files/plots/Ea_plots/\"\n# plt.savefig(path+\"ACH_vs_Eai.png\", dpi = 300, bbox_inches='tight')\n\n\n\n#%%\n#%% Temperature effect on Ea\n\n\n\n\n\n\n\n\n\n\n\n\n\n#%%\n\n\n\n\n\n\n\n\n\n\n\n ","sub_path":"results_plot.py","file_name":"results_plot.py","file_ext":"py","file_size_in_byte":13545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"633144683","text":"import pprint\n\ndef transpose(mat):\n\tmat = [[row[i] for row in mat]for i in range(0,len(mat[0]))]\n\treturn mat\n\n\ndef exchangecol(mat):\n\tfor i in range(0,len(mat)):\n\t\tmat[i][0],mat[i][len(mat[0])-1] = mat[i][len(mat[0])-1],mat[i][0]\n\n\treturn mat\n\n\ndef exchangerow(mat):\n for i in range(0,len(mat[0])):\n mat[0][i],mat[len(mat)-1][i] = mat[len(mat)-1][i],mat[0][i]\n\n return mat\n\n\ndef rotateright(mat):\n\tmat1 = transpose(mat)\n\tmat1 = exchangecol(mat1)\n\t\n\treturn mat1\n\ndef rotateleft(mat):\n mat1 = transpose(mat)\n mat1 = exchangerow(mat1)\n\n return mat1\n\n\nmat = [[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]]\n\nmat1 = rotateleft(mat)\n\nmat2 = rotateright(mat)\n\npprint.pprint(mat1)\npprint.pprint(mat2)\n\n\n\"\"\"\nmat = transpose(mat)\n\nmat = exchangecol(mat)\n\nmat = exchangerow(mat)\n\t\npprint.pprint(mat)\n\"\"\"\n\n\n\n","sub_path":"python/rotat_matrix.py","file_name":"rotat_matrix.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"299610413","text":"#!/usr/bin/env python2\n\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport time\n\ndef runcommand(command):\n print(command)\n time.sleep(10)\n return os.system(command)\n\ny_values = {\n \"sintel\": {\n 6: (\n (1, [30, 32]),\n (2, [20, 22]),\n (4, [14, 16]),\n (8, [12, 14]),\n (16, [10, 12]),\n ),\n 12: (\n (1, [26]),\n (2, [14, 16]),\n (4, [12, 14]),\n (8, [10, 12]),\n (16, [10, 12]),\n ),\n 24: (\n (1, [22, 24]),\n (2, [14, 16]),\n (4, [12, 14]),\n )\n },\n \"tears\": {\n 6: (\n (1, [30, 32]),\n (2, [24, 26]),\n (4, [18, 20]),\n (8, [16, 18]),\n (16, [16, 18]),\n ),\n 12: (\n (1, [26, 28]),\n (2, [20, 22]),\n (4, [16, 18]),\n (8, [16]),\n (16, [14, 16]),\n ),\n 24: (\n (1, [26, 28]), #[26, 28]\n (2, [20, 22]),\n (4, [16, 18]),\n )\n },\n}\n\nRUN_K = \"NOUPLOAD=1 REGION={region} FRAMES={num_frames} ~/excamera-results/scripts/run_K.sh {movie} {kfdist} {nworkers} {offset} {quality}\"\n\nif __name__ == '__main__':\n if len(sys.argv) != 6:\n print(\"usage: {} \".format(sys.argv[0]))\n sys.exit(1)\n\n movie = sys.argv[1]\n region = sys.argv[2]\n num_frames = int(sys.argv[3])\n idx = int(sys.argv[4])\n REPEAT_TIMES = int(sys.argv[5])\n\n assert(movie == \"sintel\" or movie == \"tears\")\n assert((num_frames == 6 and idx <= 3) or (num_frames == 12 and idx <= 1)\n or (num_frames == 24 and idx == 0))\n\n if movie == \"sintel\":\n TOTAL_CHUNKS = 3552\n\n if num_frames == 6:\n NUM_WORKERS = [880, 896, 880, 896]\n elif num_frames == 12:\n NUM_WORKERS = [880, 896]\n elif num_frames == 24:\n NUM_WORKERS = [888]\n else:\n TOTAL_CHUNKS = 2936\n\n if num_frames == 6:\n NUM_WORKERS = [736, 736, 736, 728]\n elif num_frames == 12:\n NUM_WORKERS = [736, 732]\n elif num_frames == 24:\n NUM_WORKERS = [734]\n\n with open(\"run_log_%s\" % time.time(), \"w\") as runlog:\n for K, ys in y_values[movie][num_frames]:\n for y in ys:\n for i in range(REPEAT_TIMES):\n print(\"Speed test ({}) for s{:02d}_k{:02d}-y{:02d}\".format(i, num_frames, K, y))\n\n retval = runcommand(RUN_K.format(region=region,\n kfdist=K,\n num_frames=num_frames,\n nworkers=NUM_WORKERS[idx],\n offset=sum(NUM_WORKERS[:idx]),\n quality=y,\n movie=movie))\n\n if retval == 0:\n print(\"[OK] speed test ({}) for s{:02d}_k{:02d}-y{:02d}\".format(i, num_frames, K, y), file=runlog)\n else:\n print(\"[FAIL] speed test ({}) for s{:02d}_k{:02d}-y{:02d}\".format(i, num_frames, K, y), file=runlog)\n","sub_path":"scripts/speed_run.py","file_name":"speed_run.py","file_ext":"py","file_size_in_byte":3396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"49715242","text":"import math\r\n\r\ndef createD(upper):\r\n #create a string with the concatenation of the first upper positive integers\r\n s = \"\"\r\n for i in range(1,upper+1):\r\n s = s + str(i)\r\n return s\r\n\r\n#print(len(createD(190000))) #experimental verification shows this is enough length\r\n\r\ndef problem40():\r\n s = createD(190000)\r\n return int(s[0])*int(s[9])*int(s[99])*int(s[999])*int(s[9999])*int(s[99999])*int(s[999999])\r\n\r\nprint(problem40())\r\n","sub_path":"040.py","file_name":"040.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"417113536","text":"from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer\nfrom math import pi, atan, sqrt, sin, cos\nfrom random import randrange\nfrom PyQt5 import QtWidgets,QtGui\nfrom qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer\nfrom random import randrange\nimport numpy as np\nfrom qiskit import *\nimport os\nimport time\nimport sys\n\n\ndef bir():\n\n os.system(\"title MisFortune - İkinci Soru\")\n os.system(\"cls\")\n print(\"\\n\\n MisFortune - Birinci Soru\")\n \n app = QtWidgets.QApplication(sys.argv)\n \n pencere = QtWidgets.QWidget()\n \n pencere.setWindowTitle(\"MisFortune - ikinci Soru\")\n \n etiket = QtWidgets.QLabel(pencere)\n \n etiket.setText(\"\"\"\n \n MisFortune\n \n \n Kayan Noktalı Sayılar (Float)\n \n \n Kayan Noktalı sayı, real sayıların bilgisayardaki karşılığıdır.\n \n örneğin 1.0 Kayan noktalı sayıdır.\n \n Klasik Bilgisayara geçirilen bir sayı ikilik tabanda yazılır.\n \n Örneğin 10 sayısının ikilik tabana çevirilmiş hali 0101 şeklinde yazılır.\n\n Bunun sebebi ise klasik bilgisayarın silisyumdan yapılmasıdır.\n\n \n \n \"\"\")\n\n\n etiket.move(10,2)\n pencere.setGeometry(50,50,500,300)\n pencere.show()\n return(app.exec_())\n\ndef bir1(add):\n\n os.system(\"cls\")\n print(\"\\n\\n MisFortune - İkinci Soru ---{}---\".format(add))\n secim = input(\"\\n\\n a = 1.0\\n b = 0.9\\n\\n a-b işleminin sonucunun ne çıkmasını beklersiniz = \\n\\n a) 0.1'e eşit\\n\\n b) 0.1'e eşit değil\\n\\n Secim = \")\n\n kontrol=-1\n\n if(secim=='b' or secim=='B'):\n kontrol=1\n\n return kontrol\n\n\ndef iki():\n\n os.system(\"title MisFortune\")\n os.system(\"cls\")\n print(\"\\n\\n MisFortune\")\n\n circ = QuantumCircuit(3)\n\n\n\n circ.h(0)\n\n circ.cx(0, 1)\n\n circ.cx(0, 2)\n\n print(\"\\n \")\n print(circ.draw())\n\n print(\"\\n\\n Seninin için 10 kuantum bit ve 10 klasik bit ile bir kuantum devresi tasarladık. \")\n test = int(input(\"\\n\\n bu devreyi kaç defa çalıştırmak istersin = \"))\n\n qreg3 = QuantumRegister(10)\n creg3 = ClassicalRegister(10)\n\n mycircuit3 = QuantumCircuit(qreg3,creg3)\n\n\n picked_qubits=[] \n\n for i in range(10):\n \n if randrange(2) == 0:\n \n mycircuit3.x(qreg3[i])\n picked_qubits.append(i)\n\n \n mycircuit3.measure(qreg3,creg3) \n\n\n mycircuit3.draw(reverse_bits=True)\n\n\n job = execute(mycircuit3,Aer.get_backend('qasm_simulator'),shots=test)\n\n counts = job.result().get_counts(mycircuit3)\n \n print(\"\\n {'ölçtüğümüz kuantum bitleri' : devreyi çalıştırma sayısı } = \",counts)\n\n\ndef ucz():\n\n os.system(\"title MisFortune - İkinci Soru\")\n os.system(\"cls\")\n print(\"\\n\\n MisFortune - Birinci Soru\")\n\n print(\"\\n\\n |v)=(a−0.2) ve |u) = (1/√b) / (−1/√3) işleminin sonucunda a,b nin cevabı nedir (hesap makinesi kullanınız).\")\n a = float(input(\"\\n\\n a = \"))\n b = float(input(\"\\n\\n b = \"))\n\n\n values = [-0.2]\n\n total = 0\n for i in range(len(values)):\n total += values[i]**2;\n\n a1 = (1-total)**0.5\n a2 = -(1-total)**0.5\n\n\n values = [-1/(3**0.5)]\n\n total = 0 \n for i in range(len(values)):\n total += values[i]**2; \n \n b1 = 1/(1-total)\n\n kontrol = -5\n\n if(b==b1):\n kontrol = -1\n\n elif(a==a1 or a==a2):\n\n kontrol=kontrol+2\n\n\n return kontrol\n\n\ndef dort():\n\n os.system(\"cls\")\n print(\"\\n\\n MisFortune\")\n\n print(\"\\n\\n Tek bir qubit ile bir kuantum devresi oluşturalım.\")\n print(\"\\n\\n Bu devreyi başlatmak için rastgele bir açı [0, 2pi] belirleyelim ve souçları kontrol edelim.\")\n giris = int(input(\"\\n\\n 0 ila 360 derece arasında bir sayı giriniz = \"))\n\n theta = giris\n state = [cos(theta), sin(theta)]\n print(\"\\n\\n Belirlediğiniz açı : {:.10f}\".format(theta))\n print(\"\\n\\n Durum başlatılıyor : ({:.10f}, {:.10f})\".format(*state))\n\n# Create a quantum circuit\n qreg = QuantumRegister(1)\n creg = ClassicalRegister(1)\n circuit = QuantumCircuit(qreg,creg)\n\n# Initialize circuit\n circuit.initialize(state, qreg)\n\n# Now execute this circuit\n job = execute(circuit, Aer.get_backend('statevector_simulator'))\n statevector = job.result().get_statevector(circuit)\n print(\"\\n\\n Belirtilen vektör : ({:.10f}, {:.10f})\".format(*[component.real for component in statevector]))\n","sub_path":"islem.py","file_name":"islem.py","file_ext":"py","file_size_in_byte":4325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"482536434","text":"'''\nProblem 23\n\nA perfect number is a number for which the sum of its proper divisors is exactly equal to the number. For example, the sum of the proper divisors of 28 would be 1 + 2 + 4 + 7 + 14 = 28, which means that 28 is a perfect number.\n\nA number n is called deficient if the sum of its proper divisors is less than n and it is called abundant if this sum exceeds n.\n\nAs 12 is the smallest abundant number, 1 + 2 + 3 + 4 + 6 = 16, the smallest number that can be written as the sum of two abundant numbers is 24. By mathematical analysis, it can be shown that all integers greater than 28123 can be written as the sum of two abundant numbers. However, this upper limit cannot be reduced any further by analysis even though it is known that the greatest number that cannot be expressed as the sum of two abundant numbers is less than this limit.\n\nFind the sum of all the positive integers which cannot be written as the sum of two abundant numbers.\n'''\n\nimport time\n\ndef is_abundant(n):\n\treturn sum(find_divisors(n)) > n\n\ndef find_divisors(n):\n\tdivisors = set([1])\n\tfor x in range(2, int(n ** 0.5) + 1):\n\t\tif n % x == 0:\n\t\t\tdivisors.add(x)\n\t\t\tdivisors.add(n // x)\n\treturn divisors\n\ndef non_abundant_sums():\n\tnums, abundant_set = set(n for n in range(28124)), set()\n\t\n\t# Build set of abundant numbers\n\tfor n in range(1, 28124):\n\t\tif is_abundant(n):\n\t\t\tabundant_set.add(n)\n\n\t# Eliminate numbers expressible as abundant sum\n\tfor x in abundant_set:\n\t\tfor y in abundant_set:\n\t\t\ttotal = x + y\n\t\t\tif total in nums:\n\t\t\t\tnums.remove(total)\n\treturn sum(nums) \n\nif __name__ == '__main__':\n\n\tstart = time.time()\n\tprint(non_abundant_sums())\n\tend = time.time()\n\n\tprint(\"Execution time: %fs\" %(end - start))\n","sub_path":"solutions/non_abundant_sums.py","file_name":"non_abundant_sums.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"173576634","text":"Student_list = {'raja':701,'teja':702,'suraj':770,'eswar':780,'bhaskar':990,'chaitu':870,'nani':760,'pavan':775,'reddy':890,'lohith':891}\nlab_system_details = {'MCA Lab':{'1':['Free','Good'],'2':['Free','Repair'],'3':['Free','Good'],'4':['Free','Good'],'5':['Free','Good'],'6':['Free','Good'],'7':['Free','Repair'],'8':['Allocated','Good'],'9':['Allocated','Good'],'10':['Allocated','Good']},'Cisco Lab':{'1':['Free','Good'],'2':['Free','Repair'],'3':['Free','Good'],'4':['Allocated','Good'],'5':['Free','Good'],'6':['Allocated','Good'],'7':['Free','Good'],'8':['Free','Good'],'9':['Allocated','Good'],'10':['Free','Good']}}\n\ndef get_list_of_free_systems(lab_name):\n #global lab_system_details\n free_system = []\n if lab_name in lab_system_details.keys():\n lab_systems = lab_system_details[lab_name]\n for system_id in lab_systems.keys():\n if lab_systems[system_id][0] == 'Free':\n if lab_systems[system_id][1] == 'Good':\n free_system.append(system_id)\n\n return free_system\n\ndef get_list_of_good_systems(lab_name):\n #global lab_system_details\n good_system = []\n if lab_name in lab_system_details.keys():\n lab_systems = lab_system_details[lab_name]\n for system_id in lab_systems.keys():\n if lab_systems[system_id][1] == 'Good':\n if lab_systems[system_id][0] == 'Free':\n good_system.append(system_id)\n\n return good_system\n\nmca = get_list_of_free_systems('MCA Lab')\ncisco = get_list_of_good_systems('Cisco Lab')\nnew = mca + cisco\n#Student_list = {'Raja':701,'Teja':702,'Suraj':770,'chandra':880}\nh = []\ng = []\nfor x,y in Student_list.items():\n h.append(x)\n g.append(y)\nfor i in range(len(mca)):\n print(h[i] + \" - \"+ str(g[i]) + \" - \" + list(lab_system_details.keys())[0] + \" - \" + str(new[i]))\nfor i in range(len(mca),len(Student_list)):\n print(h[i] + \" - \" + str(g[i]) + \" - \" + list(lab_system_details.keys())[1] + \" - \" + str(new[i]))\n \n \n \n \n \n ","sub_path":"projectworkcompletedacs1.py","file_name":"projectworkcompletedacs1.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"384375307","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# get the commit count per sublevel pointwise or cumulative (c)\n# arguments is the tag as displayed by git tag and the number\n# of sublevels to be counted. If count is out of range for a \n# specific sublevel it will terminate the loop\n#\n# no proper header in this file \n# no legal/copyright ...OMG !\n# \n# things to cleanup:\n# restructure the code - use of functions \n# error handling ...where is the try..except ?\n# argument handling: you can do better right ?\n# documentation: once you understand it - fix the docs !\n# transform it into a class rather than just functions !\n\n\nimport os, re, sys, subprocess\nfrom datetime import datetime as dt\nclass count:\n def get_commit_cnt(git_cmd):\n cnt = 0\n raw_counts = git_cmd.communicate()[0]\n # use try except to do error detecting\n try:\n raw_counts != None\n except:\n return 0\n else:\n cnt = re.findall('[0-9]*-[0-9]*-[0-9]*', str(raw_counts))\n return len(cnt)\n\n def get_tag_days(git_cmd, base):\n seconds = git_cmd.communicate()[0]\n return ((int(seconds)-base))//3600\n\nc = count()\n\n# get dates of all commits - unsorted \nrev = sys.argv[1]\ncumulative = 0\nif len(sys.argv) == 4:\n if (sys.argv[3] == \"c\"):\n cumulative = 1\n else:\n print(\"Dont know what you mean with %s\" % sys.argv[3])\n sys.exit(-1)\nrev_range = int(sys.argv[2])\n\n# setup and fill in the table\nrev.sort()\nprint(\"#sublevel commits %s stable fixes\" % rev)\nprint(\"lv hour bugs\") #tag for R data.frame\nrev1 = rev\n# base time of v4.1 and v4.4 as ref base\n# fix this to extract the time of the base commit\n# from git !\n# \n# hofrat@Debian:~/git/linux-stable$ git log -1 --pretty=format:\"%ct\" v4.4\n# 1452466892\nv44 = 1452466892\n\nfor sl in range(1,rev_range+1):\n rev2 = rev + \".\" + str(sl)\n gitcnt = \"git rev-list --pretty=format:\\\"%ai\\\" \" + rev1 + \"...\" + rev2\n gittag = \"git log -1 --pretty=format:\\\"%ct\\\" \" + rev2\n git_rev_list = subprocess.Popen(gitcnt, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, shell=True)\n commit_cnt = c.get_commit_cnt(git_rev_list)\n if cumulative == 0:\n rev1 = rev2\n # if get back 0 then its an invalid revision number\n if commit_cnt:\n git_tag_date = subprocess.Popen(gittag, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, shell=True)\n days = c.get_tag_days(git_tag_date, v44)\n print(\"%d %d %d\" % (sl,days,commit_cnt))\n else:\n break\n","sub_path":"320180939901-PeidunLi/homework1/homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"97703028","text":"import pandas as pd\n\n#url = \"https://fbref.com/en/comps/20/schedule/Bundesliga-Scores-and-Fixtures\"\n#url = \"https://fbref.com/en/comps/22/schedule/Major-League-Soccer-Scores-and-Fixtures\"\n\nurl = \"https://www.scoreboard.com/en/rugby-union/usa/major-league-rugby/results/\"\ntables = pd.read_html(url)\n#, converters={'Wk': str,'Attendance': str})\n\nprint(tables)\nexit()\nresults = tables[0]\n\n#results = raw[raw['Date'].notnull()]\nresults = results.dropna(subset=['Home'])\nresults.drop(results[results['Day'] == 'Day'].index, inplace = True)\n\nresults.rename(columns={'xG': 'Home_xG', 'xG.1': 'Away_xG'}, inplace=True)\n\nresults.insert(0,'Season',2021)\nresults.insert(1,'Round','')\nresults.insert(2,'Wk','')\n\nresults.to_csv('csv/mls-2021.csv', index=False)\n\n\n","sub_path":"major_league/pull.py","file_name":"pull.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"357956665","text":"import random\nWORDS=(\"python\",\"jumble\",\"easy\",\"difficult\",\"answer\",\"continue\",\"phone\",\"position\",\"game\")\nprint(\"欢迎参加猜单词游戏,把字母组合成一个正确的单词\")\niscontinue=\"y\"\nwhile iscontinue==\"y\" or iscontinue==\"Y\":\n word=random.choice(WORDS)\n correct=word\n jumble=\"\"\n while word:\n position=random.randrange(len(word))\n jumble+=word[position]\n word=word[:position]+word[(position+1):]\n print(\"乱序后单词:\",jumble)\n guess=input(\"\\n请你猜:\")\n while guess !=correct and guess !=\"\":\n print(\"对不起不正确\")\n guess=input(\"继续猜:\")\n if guess==correct:\n print(\"真棒,你猜对了!\\n\")\n iscontinue = input(\"\\n\\n是否继续(Y/N):\")\n","sub_path":"jump7.py","file_name":"jump7.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"119356755","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def addTwoNumbers(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n a = []\n b = []\n while(l1):\n a.append(l1.val)\n l1 = l1.next\n \n while(l2):\n b.append(l2.val)\n l2 = l2.next\n \n x = int(\"\".join(map(str,a[::-1])))\n y = int(\"\".join(map(str,b[::-1])))\n \n z = list(str(x+y))\n res = ListNode(-1)\n for n in z:\n temp = ListNode(n)\n temp.next = res.next\n res.next = temp\n \n res = res.next\n \n return res","sub_path":"leetcode/002_add_two_numbers.py","file_name":"002_add_two_numbers.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"618971555","text":"class Person:\n def __init__(self, name, lang, website):\n self.name = name\n self.lang = lang\n self.website = website\n\ninfo = Person(\"hiekay\",\"python\",\"hiekay.github.io\") #实例化Person\nprint(info.name)\nprint(info.lang)\nprint(info.website)","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"21106342","text":"\"\"\"\nforbidstates.py - a module to encapsulate the forbidden states cost function\n\"\"\"\n\nimport autograd.numpy as anp\nimport numpy as np\n\nfrom qoc.models import Cost\nfrom qoc.standard.functions import conjugate_transpose\n\nclass ForbidStates(Cost):\n \"\"\"\n This class encapsulates a cost function that penalizes\n the occupation of forbidden states.\n\n Fields:\n cost_multiplier :: float - the wieght factor for this cost\n forbidden_states_dagger :: ndarray - the conjugate transpose of\n the forbidden states\n name :: str - a unique identifier for this cost\n normalization_constant :: int - used to normalize the cost\n requires_step_evaluation :: bool - True if the cost needs\n to be computed at each optimization time step, False\n if it should be computed only at the final optimization\n time step\n state_normalization_constants :: ndarray - the number of states\n that each evolving state is forbidden from\n \"\"\"\n name = \"forbid_states\"\n requires_step_evaluation = True\n\n\n def __init__(self, forbidden_states, system_step_count, cost_multiplier=1.):\n \"\"\"\n See class definition for arguments not listed here.\n\n Args:\n forbidden_states :: ndarray - an array where each entry\n in the first axis is an array of states that the corresponding\n evolving state is forbidden from, that is, each evolving\n state has its own list of forbidden states\n system_step_count :: int - the number of system steps in the evolution\n \"\"\"\n super().__init__(cost_multiplier=cost_multiplier)\n self.forbidden_states_dagger = conjugate_transpose(forbidden_states)\n state_count = forbidden_states.shape[0]\n self.normalization_constant = state_count * system_step_count\n self.state_normalization_constants = np.array([state_forbidden_states.shape[0]\n for state_forbidden_states\n in forbidden_states])\n\n\n def cost(self, controls, states, system_step):\n \"\"\"\n Args:\n controls :: ndarray - the control parameters for all time steps\n states :: ndarray - an array of the initial states evolved to\n the current time step\n system_step :: int - the system time step\n\n Returns:\n cost :: float - the penalty\n \"\"\"\n cost = 0\n # Compute the fidelity for each evolution state and its forbidden states.\n for i, state_forbidden_states_dagger in enumerate(self.forbidden_states_dagger):\n state = states[i]\n state_cost = 0\n for forbidden_state_dagger in state_forbidden_states_dagger:\n inner_product = anp.matmul(forbidden_state_dagger, state)[0, 0]\n state_cost = state_cost + anp.square(anp.abs(inner_product))\n #ENDFOR\n cost = cost + anp.divide(state_cost, self.state_normalization_constants[i])\n #ENDFOR\n \n # Normalize the cost for the number of evolving states\n # and the number of time evolution steps.\n cost = (cost / self.normalization_constant)\n \n return self.cost_multiplier * cost\n\n\ndef _test():\n \"\"\"\n Run tests on the module.\n \"\"\"\n system_step_count = 10\n state0 = np.array([[1], [0]])\n forbid0_0 = np.array([[1], [0]])\n forbid0_1 = np.divide(np.array([[1], [1]]), np.sqrt(2))\n state1 = np.array([[0], [1]])\n forbid1_0 = np.divide(np.array([[1], [1]]), np.sqrt(2))\n forbid1_1 = np.divide(np.array([[1j], [1j]]), np.sqrt(2))\n states = np.stack((state0, state1,))\n forbidden_states0 = np.stack((forbid0_0, forbid0_1,))\n forbidden_states1 = np.stack((forbid1_0, forbid1_1,))\n forbidden_states = np.stack((forbidden_states0, forbidden_states1,))\n fs = ForbidStates(forbidden_states, system_step_count)\n \n cost = fs.cost(None, states, None)\n expected_cost = np.divide(5, 80)\n assert(np.allclose(cost, expected_cost,))\n\n\nif __name__ == \"__main__\":\n _test()\n","sub_path":"qoc/standard/costs/forbidstates.py","file_name":"forbidstates.py","file_ext":"py","file_size_in_byte":4085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"148586519","text":"import requests\nimport json\nfrom flask import Flask, render_template, request, session\nimport ast\nimport numpy\nimport validate\n\nfirstdrink = \"\"\nmixer = \"\"\nindex = 0\n################################################################################################################################\n###############################################CLASSES##########################################################################\n################################################################################################################################\n#this is the query class that allows us to instantiate a query object and query with the string\nclass genericQuery:\n initialQuery = \"https://www.thecocktaildb.com/api/json/v1/1/filter.php?i=\"\n ingredient = \"\"\n finalQuery = \"\"\n\n def __init__(self, ingredientString):\n self.ingredient = ingredientString\n self.finalQuery = self.initialQuery + self.ingredient\n\n def queryData(self):\n requestData = requests.get(self.finalQuery)\n requestJson = requestData.json()\n # session.clear()\n return requestJson;\n\n#this queries based on the drink name and then returns the list of ingredients\nclass nameQuery:\n initialQuery = \"https://www.thecocktaildb.com/api/json/v1/1/search.php?s=\"\n cocktailName = \"\"\n finalQuery = \"\"\n\n def __init__(self, cocktail):\n self.cocktailName = cocktail\n self.finalQuery = self.initialQuery + self.cocktailName\n\n def queryData(self):\n requestData = requests.get(self.finalQuery)\n requestJson = requestData.json()\n return requestJson;\n\n################################################################################################################################\n################################################################################################################################\n################################################################################################################################\n\napp = Flask(__name__)\n@app.route('/')\ndef index():\n return render_template('init.html')\n\n################################################################################################################################\n################################################################################################################################\n################################################################################################################################\n\n@app.route('/', methods=['GET', 'POST'])\ndef my_form_post():\n #sets variables equal to those in the html form, need to find a better way to handle these arguments\n firstdrink = request.form['firstdrink']\n mixer = request.form['mixer']\n\n #instantiate query objects with the dropdown names\n firstIngredient = genericQuery(firstdrink)\n secondIngredient = genericQuery(mixer)\n\n #query data into alcohol and mixing variables\n alcohol = firstIngredient.queryData()\n mixing = secondIngredient.queryData()\n\n # Testing validity of queried data\n # validate.validateGenericDrink(firstdrink, alcohol)\n # validate.validateGenericDrink(mixer, mixing)\n\n listofIngredients = []\n listOfDrinkNames = []\n drinkImages = []\n\n # This is the intersection between the common elements between the alcohol and mixer queries\n intersection = [x for x in alcohol[\"drinks\"] if x in mixing[\"drinks\"]]\n index = len(intersection)-1\n #loop through and get all the names of the drinks\n while(index >= 0):\n listOfDrinkNames.append(intersection[index][\"strDrink\"])\n drinkImages.append(intersection[index][\"strDrinkThumb\"])\n index = index -1\n index = len(intersection)-1\n\n return render_template('init.html', value=json.dumps(drinkImages), names=json.dumps(listOfDrinkNames), length=index)\n # return render_template('test.html', value=listOfDrinkNames)\n # return render_template('test.html', value=drinkImages)\n # return render_template('test.html', value=alcohol)\n # return render_template('test.html', value=mixing)\n\n\n\n################################################################################################################################\n################################################################################################################################\n################################################################################################################################\n\n@app.route('/getIngredient', methods=['GET', 'POST'])\ndef ingredient():\n if request.method == 'POST':\n cocktailNumber = request.form['cocktail']\n cocktailNumber = cocktailNumber.replace('****', ' ')\n # cocktailNumber = int(cocktailNumber)\n ingredientsData = None\n ingredients = []\n index = 0\n keys = []\n values = []\n ingredientsDataObject = nameQuery(cocktailNumber)\n ingredientsData = ingredientsDataObject.queryData()\n #here we want to grab all the ingredients\n ingredients = [x for x in ingredientsData[\"drinks\"]]\n\n for key, value in ingredients[0].items():\n keys.append(key)\n values.append(value)\n\n ingredients = []\n measurements = []\n instructions = ''\n\n index = len(keys)-1\n while(index >= 0):\n if(\"strIngredient\" in keys[index]):\n ingredients.append(values[index])\n if(\"strMeasure\" in keys[index]):\n measurements.append(values[index])\n if(\"Instructions\" in keys[index]):\n instructions = values[index]\n index = index - 1\n index = len(ingredients)-1\n return render_template('test.html', instructions=instructions, ingredients=ingredients, measurements=measurements, nameofdrink=cocktailNumber, num=index)\n\n################################################################################################################################\n################################################################################################################################\n################################################################################################################################\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":6246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"128781635","text":"from sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics import classification_report\nimport sklearn.svm as svm\nfrom sklearn.model_selection import train_test_split\nfrom svm_trainer.util import *\n\nBUCKET_NAME = \"svmclassifier2019-mlengine\"\n\ndf = get_data(nrows=1600000)\n\ndf = clean_pre(df)\nprint(df.head)\nvectorized = TfidfVectorizer(\n sublinear_tf=True, min_df=4, max_df=0.90, norm=\"l2\", ngram_range=(1, 2)\n)\nfeatures = vectorized.fit_transform(df.text).toarray()\nlabels = df.target\nprint(features.shape)\nmodel = svm.LinearSVC(C=1.0, random_state=42, verbose=True, tol=1e-4, loss=\"hinge\")\nX_train, X_test, y_train, y_test, indices_train, indices_test = train_test_split(\n features, labels, df.index, test_size=0.2, random_state=42\n)\nmodel.fit(X_train, y_train)\ny_pred = model.predict(X_test)\n\nprint(classification_report(y_test, y_pred))\nwrite_data(model=model)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"55886794","text":"# -*- coding: utf-8 -*-\n\n\ndef is_keystore_file(keystore: dict) -> bool:\n \"\"\"Checks data in a keystore file is valid.\n :return: type(bool)\n True: When format of the keystore is valid.\n False: When format of the keystore is invalid.\n \"\"\"\n\n root_keys = [\"version\", \"id\", \"address\", \"crypto\", \"coinType\"]\n crypto_keys = [\"ciphertext\", \"cipherparams\", \"cipher\", \"kdf\", \"kdfparams\", \"mac\"]\n crypto_cipherparams_keys = [\"iv\"]\n\n return has_keys(keystore, root_keys) \\\n and has_keys(keystore[\"crypto\"], crypto_keys) \\\n and has_keys(keystore[\"crypto\"][\"cipherparams\"], crypto_cipherparams_keys)\n\n\ndef has_keys(target_data: dict, keys: list):\n \"\"\"Checks to a target data for having all of keys in list.\"\"\"\n for key in keys:\n if key not in target_data:\n return False\n return True\n","sub_path":"src/icon/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"286936213","text":"# cording: utf-8\n\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport numpy as np\nfrom retrying import retry\nfrom common.cyclelist import cycle\n\n@retry(stop_max_attempt_number=10)\ndef connected_directed_networkgraph(n=20):\n G = nx.DiGraph()\n nodes = [i for i in range(1,n+1)]\n cyclenodes = cycle(nodes)\n G.add_nodes_from(nodes) # add n nodes\n\n # ## agent につき1~2本の in/out edge を生成\n # inum = np.random.choice((1,2,),1)[0]\n # onum = np.random.choice((1,2,),1)[0]\n for i in range(0,n):\n ## agent につき1~2本の in/out edge を生成\n inum = np.random.choice((1,2,3,),1)[0]\n onum = np.random.choice((1,2,3,),1)[0]\n ## 各agentの周辺4 nodeへランダムに edge 生成\n neighbors = cyclenodes.forward(i,4) + cyclenodes.backward(i,4)\n ineighbors = np.random.choice(neighbors,inum,replace=False)\n oneighbors = np.random.choice(neighbors,onum,replace=False)\n for j in ineighbors:\n G.add_edge(nodes[i],j)\n for j in oneighbors:\n G.add_edge(j,nodes[i])\n \n if not nx.is_strongly_connected(G):\n raise Exception()\n else:\n print(\"connected\")\n\n (adjMat, maxdeg) = __network_constructure(G)\n\n return (G, adjMat, maxdeg)\n\n\ndef connected_wattzstrogatz_networkgraph(n=15, k=3, p=0.4, s=1):\n WSG = nx.connected_watts_strogatz_graph(n,k,p,tries=100,seed=s)\n (adjMat, maxdeg) = __network_constructure(WSG)\n return (WSG, adjMat, maxdeg)\n\ndef __network_constructure(G):\n adjMat = np.array(nx.to_numpy_matrix(G)).T\n if type(G) is nx.MultiDiGraph or type(G) is nx.DiGraph:\n maxdeg = max(dict(G.in_degree).values())\n else:\n maxdeg = max(dict(G.degree).values())\n return (adjMat, maxdeg)\n\ndef __main():\n G, _, _ = connected_wattzstrogatz_networkgraph()\n pos = nx.circular_layout(G)\n nx.draw(G, pos, font_size=8)\n # plt.savefig(path+\"/graph_n20.png\")\n # plt.savefig(path+\"/graph_n20.pdf\")\n plt.show()\n\nif __name__ == \"__main__\":\n __main()","sub_path":"multiagent-deep_learning-master/networkgraph.py","file_name":"networkgraph.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"371004606","text":"def factorial_loop(n):\n p = 1\n for i in range(1, n + 1):\n p *= i\n\n return p\n\ndef nchoosek(n,k):\n return int(factorial_loop(n)/(factorial_loop(k)*factorial_loop(n-k)))\n\ndef pascal(n):\n linjeliste = []\n for i in range(n):\n linje = \"\"\n for j in range(i + 1):\n linje += f'{nchoosek(i,j)} '\n linjeliste.append(linje)\n for linje in linjeliste:\n print(linje.center(len(linjeliste[n-1])))\npascal(int(input(\"Antall linjer: \")))","sub_path":"pascals_trekant.py","file_name":"pascals_trekant.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"508301913","text":"import helpers, testly, re, sys\n\nfrom os import path, makedirs\nfrom shutil import rmtree\nfrom tempfile import gettempdir\nfrom pyppl.parameters import Parameter, Parameters, HelpAssembler, Commands\nfrom pyppl.exception import ParameterNameError, ParameterTypeError, ParametersParseError, ParametersLoadError\n\nnoANSI = lambda s: '\\n'.join(line.rstrip() for line in re.sub(r'\\x1B\\[[0-?]*[ -/]*[@-~]', '', s).split('\\n'))\n\nclass TestParameter (testly.TestCase):\n\n\tdef dataProvider_testInit(self):\n\t\tyield '', '', None, None, ParameterNameError, 'Expect a string with alphabetics and underlines in length 1~32'\n\t\tyield '+', '', None, None, ParameterNameError, 'Expect a string with alphabetics and underlines in length 1~32'\n\t\tyield 'a?', '', None, None, ParameterNameError, 'Expect a string with alphabetics and underlines in length 1~32'\n\t\tyield int, '', None, None, ParameterNameError, 'Not a string'\n\t\tyield 'a', 1, 'int', ['Default: 1']\n\t\tyield 'a', [], 'list', ['Default: []']\n\t\tyield 'atuple', (), 'list', ['Default: []']\n\t\tyield 'a', u'a', 'str', ['Default: \\'a\\'']\n\t\tyield 'a', '', 'str', [\"Default: ''\"]\n\t\tyield 'a', {}, None, None, ParameterTypeError, 'Unsupported parameter type: dict'\n\n\tdef testInit(self, name, value, t, desc = None, exc = None, excmsg = None):\n\t\tdesc = desc or []\n\t\tif excmsg:\n\t\t\tself.assertRaisesRegex(exc, excmsg, Parameter, name, value)\n\t\telse:\n\t\t\tparam = Parameter(name, value)\n\t\t\tself.assertIsInstance(param, Parameter)\n\t\t\tself.assertEqual(param.desc, desc)\n\t\t\tself.assertFalse(param.required)\n\t\t\tself.assertTrue(param.show)\n\t\t\tself.assertEqual(param.type, t)\n\t\t\tself.assertEqual(param.name, name)\n\t\t\tif t == 'list':\n\t\t\t\tself.assertListEqual(param.value, list(value))\n\t\t\telse:\n\t\t\t\tself.assertEqual(param.value, value)\n\t\t\tself.assertTrue(param == param)\n\t\t\tself.assertFalse(param != param)\n\n\tdef dataProvider_testSetGetAttr(self):\n\t\t# 0\n\t\tyield 'a', '', 'desc', 'whatever description', ['whatever description', \"Default: ''\"]\n\t\tyield 'a', '', 'required', True\n\t\tyield 'a', '', 'required', False\n\t\tyield 'a', True, 'required', True, None, ParameterTypeError, 'Bool option \"a\" cannot be set as required'\n\t\tyield 'a', '', 'show', True\n\t\t# 5\n\t\tyield 'a', '', 'show', False\n\t\tyield 'a', '1', 'type', 'i', 'int'\n\t\tyield 'a', '1', 'type', 'int', 'int'\n\t\tyield 'a', '1', 'type', int, 'int'\n\t\tyield 'a', '1', 'type', 'b', 'bool'\n\t\t# 10\n\t\tyield 'a', '1', 'type', 'bool', 'bool'\n\t\tyield 'a', '1', 'type', bool, 'bool'\n\t\tyield 'a', '1', 'type', 'f', 'float'\n\t\tyield 'a', '1', 'type', 'float', 'float'\n\t\tyield 'a', '1', 'type', float, 'float'\n\t\t# 15\n\t\tyield 'a', 0, 'type', 's', 'str'\n\t\tyield 'a', 0, 'type', 'str', 'str'\n\t\tyield 'a', 0, 'type', str, 'str'\n\t\tyield 'a', 0, 'type', 'l', 'list'\n\t\tyield 'a', 0, 'type', 'list', 'list'\n\t\t# 20\n\t\tyield 'a', 0, 'type', list, 'list'\n\t\tyield 'a', 0, 'type', 'l:i', 'list:int'\n\t\tyield 'a', 0, 'type', 'l:int', 'list:int'\n\t\tyield 'a', 0, 'type', 'list:int', 'list:int'\n\t\tyield 'a', 0, 'type', 'l:b', 'list:bool'\n\t\t# 25\n\t\tyield 'a', 0, 'type', 'l:bool', 'list:bool'\n\t\tyield 'a', 0, 'type', 'list:bool', 'list:bool'\n\t\tyield 'a', 0, 'type', 'l:f', 'list:float'\n\t\tyield 'a', 0, 'type', 'l:float', 'list:float'\n\t\tyield 'a', 0, 'type', 'list:f', 'list:float'\n\t\t# 26\n\t\tyield 'a', 0, 'type', dict, None, ParameterTypeError, 'Unsupported type'\n\t\tyield 'a', '', 'value', 'a'\n\t\tyield 'a', '', 'value', 2\n\t\tyield 'a', '', 'name', 'a2'\n\n\t\tyield 'a', 1, '__dict__', {'_props': {'name': 'a', 'show': True, 'required': False, 'type': 'int', 'value': 1, 'desc': []}}\n\n\tdef testSetGetAttr(self, name, val, propname, propval, exptval = None, exception = None, msg = None):\n\t\texptval = exptval or propval\n\t\tp = Parameter(name, val)\n\t\tif exception:\n\t\t\tself.assertRaisesRegex(exception, msg, setattr, p, propname, propval)\n\t\telse:\n\t\t\tsetattr(p, propname, propval)\n\t\t\tself.assertEqual(getattr(p, propname), exptval)\n\n\tdef dataProvider_testReprStr(self):\n\t\tp1 = Parameter('a', 'a')\n\t\tp1.required = True\n\t\tyield p1,\n\n\t\tp2 = Parameter('b', 'b')\n\t\tp2.show = True\n\t\tyield p2,\n\n\t\tp3 = Parameter('c', 2)\n\t\tp3.desc = 'what'\n\t\tyield p3,\n\n\tdef testReprStr(self, p):\n\t\tself.assertEqual(\n\t\t\trepr(p), \n\t\t\t''.format(','.join([\n\t\t\t\tkey + '=' + repr(val) \n\t\t\t\tfor key, val in p._props.items()\n\t\t\t]), hex(id(p)))\n\t\t)\n\t\tself.assertEqual(str(p), str(p.value))\n\t\n\tdef dataProvider_testForceType(self):\n\t\tp1 = Parameter('a', 'a')\n\t\tyield p1, int, 0, ParameterTypeError\n\n\t\tp2 = Parameter('a', '')\n\t\tyield p2, int, 0, ParameterTypeError\n\n\t\tp3 = Parameter('a', '0')\n\t\tyield p3, 'list:str', ['0']\n\t\tyield p3, 'list:bool', [False]\n\n\t\tp4 = Parameter('aint', '0')\n\t\tyield p4, int, 0\n\t\t\n\t\tp5 = Parameter('a', 'False')\n\t\tyield p5, bool, 0\n\n\tdef testForceType(self, p, t, val, exception = None):\n\t\tif exception:\n\t\t\tself.assertRaises(exception, setattr, p, 'type', t)\n\t\telse:\n\t\t\tp.type = t\n\t\t\tself.assertEqual(p.value, val)\n\t\nclass TestParameters(testly.TestCase):\n\n\tdef setUpMeta(self):\n\t\tself.testdir = path.join(gettempdir(), 'PyPPL_unittest', 'TestParameters')\n\t\tif path.exists(self.testdir):\n\t\t\trmtree(self.testdir)\n\t\tmakedirs(self.testdir)\n\n\tdef testInit(self):\n\t\tps = Parameters()\n\t\tself.assertIsInstance(ps, Parameters)\n\t\tself.assertEqual(ps._props['usage'], [])\n\t\tself.assertEqual(ps._props['desc'], [])\n\t\tself.assertListEqual(ps._props['hopts'], ['-h', '--help', '-H', '-?'])\n\t\tself.assertEqual(ps._props['prefix'], '-')\n\t\tself.assertIsInstance(ps.__dict__['_assembler'], HelpAssembler)\n\t\tself.assertEqual(ps._assembler.theme, HelpAssembler.THEMES['default'])\n\t\tself.assertDictEqual(ps._params, {})\n\t\tself.assertTrue(ps == ps)\n\t\tself.assertFalse(ps != ps)\n\n\tdef dataProvider_testSetGetAttr(self):\n\t\tps = Parameters()\n\t\t#yield ps, '_props', None, ParameterNameError\n\t\tyield ps, 'a', 1\n\t\t\n\t\tps1 = Parameters()\n\t\tps1.a = 'a'\n\t\tyield ps1, 'a', 'a'\n\n\tdef testSetGetAttr(self, ps, name, value, exception = None):\n\t\tif exception:\n\t\t\tself.assertRaises(exception, setattr, ps, name, value)\n\t\telse:\n\t\t\tsetattr(ps, name, value)\n\t\t\tp = getattr(ps, name)\n\t\t\tself.assertIsInstance(p, Parameter)\n\t\t\tself.assertEqual(p.name, name)\n\t\t\tself.assertEqual(p.value, value)\n\t\t\tself.assertIn(name, ps._params)\n\t\t\t\n\t\t\tps[name] = value\n\t\t\tp = ps[name]\n\t\t\tself.assertIsInstance(p, Parameter)\n\t\t\tself.assertEqual(p.name, name)\n\t\t\tself.assertEqual(p.value, value)\n\t\t\tself.assertIn(name, ps._params)\n\n\t\t\tdel ps._params[name]\n\t\t\tself.assertNotIn(name, ps._params)\n\t\t\tp = getattr(ps, name)\n\t\t\tself.assertEqual(p.name, name)\n\t\t\tself.assertEqual(p.value, None)\n\t\t\tp.value = value\n\t\t\tself.assertEqual(p.value, value)\n\n\tdef dataProvider_testSetTheme(self):\n\t\tyield 'default',\n\t\tyield 'blue',\n\t\tyield {},\n\t\tyield {'title': 'red'},\n\n\tdef testSetTheme(self, theme):\n\t\tps = Parameters()\n\t\tps._setTheme(theme)\n\t\tself.assertEqual(ps._assembler.theme, HelpAssembler.THEMES.get(str(theme), theme))\n\n\tdef testRepr(self):\n\t\tps = Parameters()\n\t\tps.a\n\t\tps.b\n\t\tself.assertIn('', repr(ps))\n\n\tdef dataProvider_testCall(self):\n\t\tps = Parameters()\n\t\tyield ps, 'prefix', '', '', ParametersParseError\n\t\tyield ps, 'prefix', 'a', 'a'\n\t\tyield ps, 'prefix', '-', '-'\n\t\tyield ps, 'prefix', '--', '--'\n\n\t\t# 4, hopts\n\t\tyield ps, 'hopts', '', ['']\n\t\tyield ps, 'hopts', 'a', ['a']\n\t\tyield ps, 'hopts', '-', ['-']\n\t\tyield ps, 'hopts', ' --, -h', ['--', '-h']\n\t\tyield ps, 'hopts', ['--help', '?'], ['--help', '?']\n\t\t# cannot be tested solely\n\t\tyield ps, 'hopts', '?', ['?']\n\n\t\t# 10, usage\n\t\tyield ps, 'usage', '', ['']\n\t\tyield ps, 'usage', 'a', ['a']\n\t\tyield ps, 'usage', 'a\\nb', ['a\\nb']\n\t\tyield ps, 'usage', ' a \\n\\n b \\n', [' a \\n\\n b \\n']\n\n\t\t# 14, desc\n\t\tyield ps, 'desc', '', ['']\n\t\tyield ps, 'desc', 'a', ['a']\n\t\tyield ps, 'desc', 'a\\nb', ['a\\nb']\n\t\tyield ps, 'desc', ' a \\n\\n b \\n', [' a \\n\\n b \\n']\n\t\t#yield ps, 'Unknown', '', '', AttributeError\n\n\n\tdef testCall(self, ps, option, value, outval, exception = None):\n\t\tself.assertTrue(callable(ps))\n\t\tif exception:\n\t\t\tself.assertRaises(exception, ps, option, value)\n\t\telse:\n\t\t\tps(option, value)\n\t\t\tself.assertEqual(ps._props[option], outval)\n\n\tdef dataProvider_testParseName(self):\n\t\tps = Parameters()\n\t\tps('prefix', '---')\n\t\tyield ps, '-a', None, 'auto', None\n\t\tyield ps, '----a', None, 'auto', None\n\t\tyield ps, '---a', 'a', 'auto', None\n\t\tyield ps, '---a:i', 'a', 'int', None\n\t\tyield ps, '---a:int', 'a', 'int', None\n\t\tyield ps, '---a:s', 'a', 'str', None\n\t\tyield ps, '---a:str', 'a', 'str', None\n\t\tyield ps, '---a:b', 'a', 'bool', None\n\t\tyield ps, '---a:bool', 'a', 'bool', None\n\t\tyield ps, '---a:f', 'a', 'float', None\n\t\tyield ps, '---a:float', 'a', 'float', None\n\t\tyield ps, '---a:l', 'a', 'list:auto', None\n\t\tyield ps, '---a:list', 'a', 'list:auto', None\n\t\tyield ps, '---a:l:s', 'a', 'list:str', None\n\t\tyield ps, '---a:list:s', 'a', 'list:str', None\n\t\tyield ps, '---a:list:str', 'a', 'list:str', None\n\t\tyield ps, '---a:l:i', 'a', 'list:int', None\n\t\tyield ps, '---a:list:i', 'a', 'list:int', None\n\t\tyield ps, '---a:list:int', 'a', 'list:int', None\n\t\tyield ps, '---a:l:f', 'a', 'list:float', None\n\t\tyield ps, '---a:list:f', 'a', 'list:float', None\n\t\tyield ps, '---a:list:float', 'a', 'list:float', None\n\t\tyield ps, '---a:l:b', 'a', 'list:bool', None\n\t\tyield ps, '---a:list:b', 'a', 'list:bool', None\n\t\tyield ps, '---a:list:bool', 'a', 'list:bool', None\n\t\n\tdef testParseName(self, ps, argname, an, at, av):\n\t\tan1, at1, av1 = ps._parseName(argname)\n\t\tself.assertEqual(an1, an)\n\t\tself.assertEqual(at1, at)\n\t\tself.assertEqual(av1, av)\n\n\tdef dataProvider_testShouldPrintHelp(self):\n\t\tps = Parameters()\n\t\tps('hopts', '-h')\n\t\tyield ps, [], True\n\t\tyield ps, ['-h'], True\n\t\tps1 = Parameters()\n\t\tps1('hopts', ['--help'])\n\t\tyield ps1, [], True\n\t\tyield ps1, ['-h'], False\n\t\tps2 = Parameters()\n\t\tps2._hbald = False\n\t\tyield ps2, [], False\n\n\tdef testShouldPrintHelp(self, ps, args, should):\n\t\tself.assertEqual(ps._shouldPrintHelp(args), should)\n\n\tdef dataProvider_testCoerceValue(self):\n\t\tyield testly.Data('1', outval = 1)\n\t\tyield testly.Data('1.1', outval = 1.1)\n\t\tyield testly.Data('1.1E-2', outval = 0.011)\n\t\tyield testly.Data('TRUE', outval = True)\n\t\tyield testly.Data('py:[1,2]', outval = [1,2])\n\t\tyield testly.Data(True, outval = True)\n\t\tyield '1', 'int', 1\n\t\tyield '1.1', 'float', 1.1\n\t\tyield 'False', 'bool', False\n\t\tyield True, 'str', 'True'\n\t\tyield 'a', 'int', None, ParameterTypeError\n\t\tyield '{\"a\":1}', 'py', {\"a\": 1}\n\t\tyield '1', 'list', [1]\n\t\tyield '1', 'list:str', ['1']\n\t\tyield '1', 'list:bool', [True]\n\t\tyield 123, 'x', 123\n\t\tyield 1, 'list:one', [[1]]\n\n\tdef testCoerceValue(self, value, t = 'auto', outval = None, exception = None):\n\t\tif exception:\n\t\t\tself.assertRaises(exception, Parameters._coerceValue, value, t)\n\t\telse:\n\t\t\toutval = outval is None and value or outval\n\t\t\tself.assertEqual(Parameters._coerceValue(value, t), outval)\n\n\tdef dataProvider_testPutValue(self):\n\t\tps = Parameters()\n\t\tyield ps, 'noSuchArgname', None, None, None, False\n\t\tps.a.type = 'list'\n\t\tyield ps, 'a', 'auto', 1, 1, False\n\t\tyield ps, 'a', 'auto', '2', 2, False\n\t\tyield ps, 'a', 'auto', '', '', False\n\t\tyield ps, 'a', 'list:str', 3, ['3'], True\n\t\tps.b.type = 'bool'\n\t\tyield ps, 'b', 'auto', 'F', False, False\n\t\tps.c.type = 'list'\n\t\tyield ps, 'c', 'list:one', 1, [[1]], True\n\t\tyield ps, 'd', 'auto', '1', 1, False, True\n\n\n\tdef testPutValue(self, ps, argname, argtype, argval, outval, ret, arbi = False):\n\t\twith self.assertStdOE():\n\t\t\tr = ps._putValue(argname, argtype, argval, arbi)\n\t\tself.assertEqual(r, ret)\n\t\tif argname in ps._params:\n\t\t\tself.assertEqual(ps._params[argname].value, outval)\n\n\tdef dataProvider_testToDict(self):\n\t\tps = Parameters()\n\t\tps.a = 1\n\t\tps.b = 2\n\t\tyield ps, {'a':1, 'b':2}\n\n\t\tps2 = Parameters()\n\t\tyield ps2, {}\n\n\t\tps3 = Parameters()\n\t\tps3.x = True\n\t\tps3.y = []\n\t\tyield ps3, {'x': True, 'y': []}\n\n\tdef testToDict(self, ps, values):\n\t\td = ps.asDict()\n\t\tself.assertDictEqual(d, values)\n\n\tdef dataProvider_testParse(self):\n\t\tps = Parameters()\n\t\tyield ps, [], {}, 'USAGE', SystemExit\n\t\t\n\t\tps1 = Parameters()\n\t\tps1('hopts', '-h')\n\t\tyield ps1, ['a', 'b', '-h'], {}, 'USAGE', SystemExit, None\n\n\t\tps2 = Parameters()\n\t\tps2('prefix', '--param-')\n\t\tps2.a\n\t\tyield ps2, ['--param-a=b'], {'a': 'b', '_': []}\n\t\tyield ps2, ['--param-d'], {'a': 'b', '_': []}, 'Warning: No such option: --param-d'\n\n\t\tps3 = Parameters()\n\t\tps3('prefix', '--param-')\n\t\tps3.e = True\n\t\tps3.e.type = 'bool'\n\t\tyield ps3, ['--param-e=False'], {'e': False, '_': []}\n\t\t# 5\n\t\tyield ps3, ['--param-e'], {'e': True, '_': []}\n\t\tyield ps3, ['--param-e', 'Yes'], {'e': True, '_': []}\n\t\tyield ps3, ['--param-e', 't'], {'e': True, '_': []}\n\t\tyield ps3, ['--param-e', 'true'], {'e': True, '_': []}\n\t\tyield ps3, ['--param-e', 'y'], {'e': True, '_': []}\n\t\t# 10\n\t\tyield ps3, ['--param-e', '1'], {'e': True, '_': []}\n\t\tyield ps3, ['--param-e', 'on'], {'e': True, '_': []}\n\t\tyield ps3, ['--param-e', 'f'], {'e': False, '_': []}\n\t\tyield ps3, ['--param-e', 'false'], {'e': False, '_': []}\n\t\tyield ps3, ['--param-e', 'no'], {'e': False, '_': []}\n\t\t# 15\n\t\tyield ps3, ['--param-e', 'n'], {'e': False, '_': []}\n\t\tyield ps3, ['--param-e', '0'], {'e': False, '_': []}\n\t\tyield ps3, ['--param-e', 'off'], {'e': False, '_': []}\n\t\tyield ps3, ['--param-e', 'a'], {'e': True, '_': []}, None, ParameterTypeError, \"Unable to coerce value 'a' to type: 'bool'\"\n\n\t\tps4 = Parameters()\n\t\tps4('prefix', '--param-')\n\t\tps4.f = []\n\t\tps4.f.type = 'list:str'\n\t\tyield ps4, ['--param-f=1'], {'f': ['1'], '_': []}\n\t\t# 20\n\t\tyield ps4, ['--param-f=1', '2', '3'], {'f': ['1', '1', '2', '3'], '_': []}\n\n\t\tps5 = Parameters()\n\t\tps5('prefix', '--param-')\n\t\tps5.g = ''\n\t\tyield ps5, ['--param-g'], {'g': True, '_': []}, 'Warning: Decleared type \"str\" ignored, use \"bool\" instead for option --param-g.'\n\t\tyield ps5, ['--param-g', 'a', 'b'], {'g': 'a', '_': ['b']}\n\n\t\tps6 = Parameters()\n\t\tps6('prefix', '--param-')\n\t\tps6('hbald', False)\n\t\tps6.h.required = True\n\t\t# 23\n\t\tyield ps6, [], {}, 'Error: Option --param-h is required.', SystemExit\n\n\t\tps7 = Parameters()\n\t\tps7('prefix', '--param-')\n\t\tps7.i = 1\n\t\tyield ps7, ['--param-i=a'], {}, None, ParameterTypeError, 'Unable to coerce'\n\n\t\t# mixed\n\t\tps8 = Parameters()\n\t\tps8('prefix', '--param-')\n\t\tps8.a.type = 'str'\n\t\tps8.b.type = 'str'\n\t\tps8.c\n\t\t# 25\n\t\tyield ps8, ['--param-a=1', '--param-b', '2', '--param-c=\"3\"'], {'a':'1', 'b':'2', 'c':'\"3\"', '_':[]}\n\n\t\tps9 = Parameters()\n\t\tps9('prefix', '--param-')\n\t\tps9.a = []\n\t\tps9.a.type = 'list:str'\n\t\tps9.b = []\n\t\tps9.c = []\n\t\tyield ps9, ['--param-a=1', '2', '--param-b', 'a', '--param-c'], {'a': ['1', '2'], 'b': ['a'], 'c': [], '_': []}\n\n\t\tps10 = Parameters()\n\t\tps10('prefix', '--param-')\n\t\tps10.a = False\n\t\tps10.b = False\n\t\tps10.c = False\n\t\tyield ps10, ['--param-a', '--param-b', '1', '--param-c=yes'], {'a': True, 'b': True, 'c': True, '_':[]}\n\n\t\tps11 = Parameters()\n\t\tps11('prefix', '--param-')\n\t\tps11.a\n\t\tps11.b = 'a'\n\t\tps11.c = 1\n\t\tps11.d = False\n\t\tps11.e = []\n\t\tyield ps11, ['--param-d'], {'a':None, 'b':'a', 'c':1, 'd': True, 'e':[], '_': []}\n\t\tyield ps11, ['a', '--param-d', 'no', 'b', '--param-c=100', '--param-e:l:s', '-1', '-2'], {'a': None, 'b':'a', 'c':100, 'd': False, 'e':['-1', '-2'], '_': ['a', 'b']}, 'Warning: Decleared type \"list\" ignored, use \"list:str\" instead for option --param-e.'\n\n\t\tps12 = Parameters()\n\t\tps12.a\n\t\tps12.b\n\t\tyield ps12, ['-a', '-b=1'], {'a':True, 'b':1, '_': []}\n\n\tdef testParse(self, ps, args, values, stderr = [], exception = None, msg = None):\n\t\tif exception:\n\t\t\twith helpers.captured_output() as (out, err):\n\t\t\t\tself.assertRaisesRegex(exception, msg, ps.parse, args)\n\t\t\tif stderr:\n\t\t\t\tif not isinstance(stderr, list):\n\t\t\t\t\tstderr = [stderr]\n\t\t\t\tfor stde in stderr:\n\t\t\t\t\tself.assertIn(stde, err.getvalue())\n\t\telse:\n\t\t\twith helpers.captured_output() as (out, err):\n\t\t\t\td = ps.parse(args)\n\n\t\t\tif stderr:\n\t\t\t\tif not isinstance(stderr, list):\n\t\t\t\t\tstderr = [stderr]\n\t\t\t\tfor stde in stderr:\n\t\t\t\t\tself.assertIn(stde, err.getvalue())\n\t\t\telse:\n\t\t\t\tself.assertEqual(err.getvalue(), '')\n\n\t\t\tself.assertDictEqual(d, values)\n\n\tdef dataProvider_testHelp(self):\n\t\tps = Parameters()\n\t\tyield ps, [\n\t\t\t'USAGE:',\n\t\t\t' testParameters.py',\n\t\t\t'',\n\t\t\t'OPTIONAL OPTIONS:',\n\t\t\t' -h, --help, -H, -? - Print this help information',\n\t\t\t''\n\t\t]\n\t\t\n\t\tps1 = Parameters()\n\t\tps1('hopts', '-h')\n\t\tyield ps1, [\n\t\t\t'USAGE:',\n\t\t\t' testParameters.py',\n\t\t\t'',\n\t\t\t'OPTIONAL OPTIONS:',\n\t\t\t' -h - Print this help information',\n\t\t\t''\n\t\t]\n\n\t\tps2 = Parameters()\n\t\tps2('prefix', '--param-')\n\t\tps2.a\n\t\tyield ps2, [\n\t\t\t'USAGE:',\n\t\t\t' testParameters.py [OPTIONS]',\n\t\t\t'',\n\t\t\t'OPTIONAL OPTIONS:',\n\t\t\t' --param-a - Default: None',\n\t\t\t' -h, --help, -H, -? - Print this help information',\n\t\t\t''\n\t\t]\n\n\t\tps3 = Parameters()\n\t\tps3.e = False\n\t\tps3.e.type = 'bool'\n\t\tps3._.required = True\n\t\tps3._.desc = 'positional options'\n\t\tyield ps3, [\n\t\t\t'USAGE:',\n\t\t\t' testParameters.py [OPTIONS] POSITIONAL',\n\t\t\t'',\n\t\t\t'REQUIRED OPTIONS:',\n\t\t\t' POSITIONAL - positional options',\n\t\t\t'',\n\t\t\t'OPTIONAL OPTIONS:',\n\t\t\t' -e (BOOL) - Default: False',\n\t\t\t' -h, --help, -H, -? - Print this help information',\n\t\t\t''\n\t\t]\n\n\t\tps4 = Parameters()\n\t\tps4('prefix', '--param-')\n\t\tps4.ef.required = True\n\t\tps4.ef.type = 'str'\n\t\tps4.ef.desc = 'This is a description of option ef. \\n Option ef is required.'\n\t\tps4.f = []\n\t\tps4.f.type = 'list'\n\t\tps4.f.desc = 'This is a description of option f. \\n Option f is not required.'\n\t\tps4.g = ps4.f # alias\n\t\tps4._.required = False\n\t\tps4._.desc = 'positional options'\n\t\tps4('usage', '{prog} User-defined usages\\n{prog} User-defined another usage'.split('\\n'))\n\t\tps4('desc', 'This program is doing: \\n* 1. blahblah\\n* 2. lalala'.split('\\n'))\n\t\tps4._helpx = lambda items: items.update({'END': ['Bye!']}) or items\n\t\tyield ps4, [\n\t\t\t'DESCRIPTION:',\n\t\t\t' This program is doing:',\n\t\t\t' * 1. blahblah',\n\t\t\t' * 2. lalala',\n\t\t\t'',\n\t\t\t'USAGE:',\n\t\t\t' testParameters.py User-defined usages',\n\t\t\t' testParameters.py User-defined another usage',\n\t\t\t'',\n\t\t\t'REQUIRED OPTIONS:',\n\t\t\t' --param-ef - This is a description of option ef.',\n\t\t\t' Option ef is required.',\n\t\t\t'',\n\t\t\t'OPTIONAL OPTIONS:',\n\t\t\t' --param-f, --param-g - This is a description of option f.',\n\t\t\t' Option f is not required.',\n\t\t\t' Default: []',\n\t\t\t' POSITIONAL - positional options',\n\t\t\t' Default: None',\n\t\t\t' -h, --help, -H, -? - Print this help information',\n\t\t\t'',\n\t\t\t'END:',\n\t\t\t' Bye!',\n\t\t\t''\n\t\t]\n\n\t\t# show = False, description\n\t\tps5 = Parameters()\n\t\tps5.g = ''\n\t\tps5.g.show = False\n\t\tyield ps5, [\n\t\t\t'Error: This is an error!',\n\t\t\t'USAGE:',\n\t\t\t' testParameters.py',\n\t\t\t'',\n\t\t\t'OPTIONAL OPTIONS:',\n\t\t\t' -h, --help, -H, -? - Print this help information',\n\t\t\t''\n\t\t], 'This is an error!'\n\n\tdef testHelp(self, ps, out, error = ''):\n\t\tself.maxDiff = 8000\n\t\tself.diffContext = None\n\t\tself.diffTheme = 'contrast'\n\t\timport sys\n\t\tsys.argv = ['progname']\n\t\th = ps.help(error)\n\t\tself.assertEqual(noANSI(h), '\\n'.join(out) + '\\n')\n\t\n\tdef dataProvider_testLoadDict(self):\n\t\tyield {}, True\n\t\tyield {'a': ''}, True\n\t\tyield {'a': []}, False\n\t\tyield {'a': [], 'a.show': True}, True # can be different\n\t\tyield {'a': 1, 'a.type': 'bool'}, False\n\t\tyield {'a': True, 'a.type': 'int', 'a.desc': 'hello'}, False\n\t\tyield {'a.type': ''}, True, ParametersLoadError, 'Cannot set attribute of an undefined option'\n\t\tyield {'a': 1, 'a.type2': ''}, True, ParametersLoadError, 'Unknown attribute name for option'\n\t\tyield {'a': 2, 'a.b.type': ''}, True, ParametersLoadError, 'Unknown attribute name for option'\n\n\tdef testLoadDict(self, dictVar, show, exception = None, msg = None):\n\t\tps = Parameters()\n\t\tif exception:\n\t\t\tself.assertRaisesRegex(exception, msg, ps.loadDict, dictVar, show)\n\t\telse:\n\t\t\tps.loadDict(dictVar, show)\n\t\t\tfor dk, dv in dictVar.items():\n\t\t\t\tif '.' in dk: \n\t\t\t\t\tpn, pa = dk.split('.', 2)\n\t\t\t\t\tp = getattr(ps, pn)\n\t\t\t\t\tself.assertIsInstance(p, Parameter)\n\t\t\t\t\tself.assertEqual(p.name, pn)\n\t\t\t\t\tif pa == 'desc':\n\t\t\t\t\t\tself.assertEqual(getattr(p, pa)[0], dv)\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.assertEqual(getattr(p, pa), dv)\n\t\t\t\telse:\n\t\t\t\t\tp = getattr(ps, dk)\n\t\t\t\t\tself.assertIsInstance(p, Parameter)\n\t\t\t\t\tself.assertEqual(p.name, dk)\n\t\t\t\t\tself.assertEqual(p.value, dv)\n\t\t\t\t\tself.assertEqual(p.show, show)\n\n\tdef dataProvider_testLoadFile(self):\n\t\tyield self.testdir, False, []\n\n\t\tjsonfile = path.join(self.testdir, 'testLoadFile.json')\n\t\thelpers.writeFile(jsonfile, '\\n'.join([\n\t\t\t'{',\n\t\t\t'\t\"a\": \"2\",',\n\t\t\t'\t\"a.desc\": \"Option a\",',\n\t\t\t'\t\"a.type\": \"int\",',\n\t\t\t'\t\"a.required\": true',\n\t\t\t'}',\n\t\t]))\n\t\tp1 = Parameter('a', 2)\n\t\tp1.desc = \"Option a\"\n\t\tp1.required = True\n\t\tyield jsonfile, True, [p1]\n\n\t\tif helpers.moduleInstalled('yaml'):\n\t\t\tyamlfile = path.join(self.testdir, 'testLoadFile.yaml')\n\t\t\thelpers.writeFile(yamlfile, '\\n'.join([\n\t\t\t\t'a: 2',\n\t\t\t\t'a.desc: Option a',\n\t\t\t\t'a.type: int',\n\t\t\t\t'a.required: false',\n\t\t\t\t'a.show: true',\n\t\t\t\t'',\n\t\t\t]))\n\t\t\tp2 = Parameter('a', 2)\n\t\t\tp2.desc = \"Option a\"\n\t\t\tp2.required = False\n\t\t\tp2.show = True\n\t\t\tyield yamlfile, False, [p2]\n\t\t\t\n\t\tconffile = path.join(self.testdir, 'testLoadFile.conf')\n\t\thelpers.writeFile(conffile, '\\n'.join([\n\t\t\t'[PARAM1]',\n\t\t\t'a = 2',\n\t\t\t'a.desc = Option a',\n\t\t\t'a.type = int',\n\t\t\t'a.required = f',\n\t\t\t'[PARAM2]',\n\t\t\t'a.type = str',\n\t\t\t'b:',\n\t\t\t'\t1',\n\t\t\t'\t2',\n\t\t\t'b.type = list',\n\t\t]))\n\t\tp3 = Parameter('a', '2')\n\t\tp3.desc = \"Option a\"\n\t\tp3.required = False\n\t\tp4 = Parameter('b', ['1','2'])\n\t\tyield conffile, True, [p3, p4]\n\n\tdef testLoadFile(self, cfgfile, show, params, exception = None, msg = None):\n\t\tps = Parameters()\n\t\tif exception:\n\t\t\tself.assertRaisesRegex(exception, msg, ps.loadFile, dictVar, cfgfile)\n\t\telse:\n\t\t\tps.loadFile(cfgfile, show)\n\t\t\tfor param in params:\n\t\t\t\tp = getattr(ps, param.name)\n\t\t\t\tself.assertDictEqual(param._props, p._props)\n\t\nclass TestCommands(testly.TestCase):\n\n\tdef testInit(self):\n\t\tcmds = Commands()\n\t\tself.assertEqual(cmds._desc, [])\n\t\tself.assertEqual(cmds._hcmd, 'help')\n\t\tself.assertEqual(cmds._cmds, {})\n\t\tself.assertIsInstance(cmds._assembler, HelpAssembler)\n\t\tself.assertEqual(cmds._assembler.theme, HelpAssembler.THEMES['default'])\n\t\tself.assertIsNone(cmds._helpx)\n\t\tcmds._helpx = lambda a: None\n\t\tself.assertTrue(callable(cmds._helpx))\n\n\tdef test_setDesc(self, indesc, outdesc):\n\t\tcmds = Commands()\n\t\tcmds._setDesc(indesc)\n\t\tcmds._desc = indesc\n\t\tself.assertEqual(cmds._desc, outdesc)\n\n\tdef dataProvider_test_setDesc(self):\n\t\tyield 'a', ['a']\n\t\tyield 'a\\nb', ['a\\nb']\n\t\tyield ['a', 'b'], ['a', 'b']\n\n\tdef test_setHcmd(self, inhcmd, outhcmd):\n\t\tcmds = Commands()\n\t\tcmds._setHcmd(inhcmd)\n\t\tcmds._hcmd = inhcmd\n\t\tself.assertEqual(cmds._hcmd, outhcmd)\n\n\tdef dataProvider_test_setHcmd(self):\n\t\tyield 'h', 'h'\n\t\tyield '?', '?'\n\n\tdef test_setTheme(self, intheme, outtheme):\n\t\tcmds = Commands()\n\t\tcmds._setTheme(intheme)\n\t\tcmds._theme = intheme\n\t\tself.assertDictEqual(cmds._assembler.theme, outtheme)\n\n\tdef dataProvider_test_setTheme(self):\n\t\tyield 'default', HelpAssembler.THEMES['default']\n\t\tyield 'blue', HelpAssembler.THEMES['blue']\n\t\tyield 'plain', HelpAssembler.THEMES['plain']\n\n\tdef testSetGetattr(self):\n\t\tcmds = Commands()\n\t\tcmds['ps1'].a\n\t\tself.assertIsInstance(cmds.ps1, Parameters)\n\t\tself.assertIsInstance(cmds.ps1.a, Parameter)\n\t\tself.assertIsInstance(cmds.ps2, Parameters)\n\n\t\tcmds.ps3 = cmds.ps1\n\t\tself.assertEqual(cmds.ps3._prog, path.basename(sys.argv[0]) + ' ' + 'ps1|ps3')\n\n\t\tcmds.ps4 = 'command ps4'\n\t\tself.assertEqual(cmds.ps4._props['desc'], ['command ps4'])\n\n\tdef testParse(self, cmds, args, retcmd, retps, arbi = False, exception = None):\n\t\tif exception:\n\t\t\twith self.assertStdOE():\n\t\t\t\tself.assertRaises(exception, cmds.parse, args, arbi)\n\t\telse:\n\t\t\tcmd, ps = cmds.parse(args, arbi)\n\t\t\tself.assertEqual(cmd, retcmd)\n\t\t\tself.assertDictEqual(ps, retps)\n\n\tdef dataProvider_testParse(self):\n\t\tcmds1 = Commands()\n\t\tcmds1._cmds['None'] = None\n\t\tyield cmds1, [], '', {}, True\n\n\t\targs1 = ['-a', '1', '-b', '2', '3', '-c:list', '4', '5', '-d']\n\t\tyield cmds1, args1, '-a', {'_': [1, 3], 'b': 2, 'c': [4, 5], 'd': True}, True\n\n\t\targs2 = ['subcmd', '-a', '1', '-b', '2', '3', '-c:list', '4', '5', '-d']\n\t\tyield cmds1, args2, 'subcmd', {'a': 1, '_': [3], 'b': 2, 'c': [4, 5], 'd': True}, True\n\n\t\targs3 = ['None', '1', '2']\n\t\tyield cmds1, args3, 'None', {'_': ['1', '2']}, True\n\n\t\tcmds2 = Commands()\n\t\tcmds2.subcmd = 'A sub command.'\n\t\tyield cmds2, None, '', {}, False, SystemExit\n\t\tyield cmds2, ['help'], '', {}, False, SystemExit\n\t\tyield cmds2, ['help', 'x'], '', {}, False, SystemExit\n\t\tyield cmds2, ['help', 'subcmd'], '', {}, False, SystemExit\n\t\tyield cmds2, ['subcmd', '1'], 'subcmd', {'_': [1]}, False\n\n\tdef dataProvider_testHelp(self):\n\t\tcmds = Commands()\n\t\tcmds._desc = 'Hello world!'\n\t\tyield cmds, [\n\t\t\t\"DESCRIPTION:\",\n\t\t\t\" Hello world!\",\n\t\t\t\"\",\n\t\t\t\"COMMANDS:\",\n\t\t\t\" help - Print help information for the command\",\n\t\t\t\"\"\n\t\t]\n\t\tyield cmds, [\n\t\t\t\"DESCRIPTION:\",\n\t\t\t\" Hello world!\",\n\t\t\t\"\",\n\t\t\t\"COMMANDS:\",\n\t\t\t\" help - Print help information for the command\",\n\t\t\t\"\",\n\t\t\t\"END:\",\n\t\t\t\" -hello - world\",\n\t\t\t\" -good - world\",\n\t\t\t\"\"\n\t\t], '', lambda items: items.update({'end': [\n\t\t\t('-hello', ' ', 'world'),\n\t\t\t('-good', 'bye', 'world'),\n\n\t\t]}) or items\n\n\tdef testHelp(self, cmds, outhelp, error = '', helpx = None):\n\t\tcmds._helpx = helpx\n\t\tself.assertEqual(noANSI(cmds.help(error)), '\\n'.join(outhelp) + '\\n')\n\n\nif __name__ == '__main__':\n\ttestly.main(verbosity=2)","sub_path":"tests/testParameters.py","file_name":"testParameters.py","file_ext":"py","file_size_in_byte":25709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"240960934","text":"import logging\nfrom functools import reduce\n\nlog = logging.getLogger(__name__)\n\n\ndef has_bin(arg):\n \"\"\"\n Helper function checks whether args contains binary data\n :param args: list | tuple | bytearray | dict\n :return: (bool)\n \"\"\"\n if isinstance(arg, list) or isinstance(arg, tuple):\n return reduce(\n lambda has_binary,\n item: has_binary or has_bin(item),\n arg,\n False)\n if isinstance(arg, bytearray) or hasattr(arg, 'read'):\n return True\n if isinstance(arg, dict):\n return reduce(\n lambda has_binary, item: has_binary or has_bin(item), [\n v for k, v in arg.items()], False)\n\n return False\n","sub_path":"socketio/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"637126780","text":"strN = input(\"Please enter a number:\")\nN = int(strN)\n\n#N = 10\n\ntotal = 0\nfor current in range(N+1):\n if current % 2 == 0:\n total = total + current\nprint(\"summation 1..\",N,\" for even numbers is\",total)\n","sub_path":"2021/examples-in-class-2021-10-01/summation1toNForEven.py","file_name":"summation1toNForEven.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"411664057","text":"\n# method for hasing strins\n# cool fact: anagrams will always have the same value \ndef hash(str, tableSize):\n sum = 0\n for pos in range(len(str)):\n sum += ord(str[pos])\n\n return sum%tableSize\n\n\nprint(hash('27', 13))","sub_path":"python/hacking/problems_python/sorting/hash.py","file_name":"hash.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"569792191","text":"import logo\nimport markdown\nimport os\nimport re\nimport yaml\n\nfrom datetime import datetime\nfrom flask import Flask, render_template, url_for, Response\nfrom os import listdir\n\n\napp = Flask(__name__)\n\nBLOG_CONTENT_DIR = 'content/blog'\n\nREGEX_SPLIT_FRONTMATTER = re.compile(r'^---$', re.MULTILINE)\n\nWIKI_REDIRECTS = {\n \"chat\": \"Matrix_and_IRC\",\n \"deviceinfo\": \"Deviceinfo_reference\",\n \"devices\": \"Supported_devices\",\n \"irc\": \"Matrix_and_IRC\",\n \"matrix\": \"Matrix_and_IRC\",\n \"troubleshooting\": \"Troubleshooting\",\n \"usbhook\": \"Inspecting_the_initramfs\",\n \"warning-repo\": \"Troubleshooting#Installed_version_newer_than_the_version_in_the_repositories\",\n \"warning-repo2\": \"Troubleshooting#Newer_version_in_binary_package_repositories_than_in_aports_folder\",\n \"wiki\": \"Main_page\",\n}\n\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\ndef reading_time(content):\n content = re.sub('<[^<]+?>', '', content)\n words_per_minute = 200\n words = content.split(\" \")\n return int(len(words) / words_per_minute)\n\n@app.route('/logo.svg')\ndef logo_svg():\n return Response(response=logo.create(phone=False), mimetype=\"image/svg+xml\")\n\ndef parse_post(post):\n with open(os.path.join(BLOG_CONTENT_DIR, post)) as handle:\n raw = handle.read()\n frontmatter, content = REGEX_SPLIT_FRONTMATTER.split(raw, 2)\n\n data = yaml.load(frontmatter)\n\n y, m, d, *title = post[:-3].split('-')\n slug = '-'.join(title)\n\n data['url'] = url_for('blog_post', y=y, m=m, d=d, slug=slug)\n data['reading_time'] = reading_time(content)\n\n return data\n\n\n@app.route('/blog/')\ndef blog():\n posts = sorted(listdir(BLOG_CONTENT_DIR), reverse=True)\n posts = map(parse_post, posts)\n return render_template('blog.html', posts=posts)\n\n\n@app.route('/blog/////')\ndef blog_post(y, m, d, slug):\n date_str = '-'.join([y, m, d])\n post_path = '-'.join([date_str, slug])\n with open('{}/{}.md'.format(BLOG_CONTENT_DIR, post_path.lower()), 'r') as f:\n text = f.read()\n frontmatter, body = REGEX_SPLIT_FRONTMATTER.split(text, 2)\n data = yaml.load(frontmatter)\n rt = reading_time(body)\n date = datetime.strptime(date_str, '%Y-%m-%d')\n html = markdown.markdown(body, extensions=[\n 'markdown.extensions.extra',\n 'markdown.extensions.codehilite',\n 'markdown.extensions.toc'\n ])\n return render_template('blog-post.html', title=data['title'], html=html, reading_time=rt, date=date)\n\n\n@app.route('//')\ndef wiki_redirect(slug):\n \"\"\" WARNING: This must be the last route! \"\"\"\n return render_template('redirect.html', url='https://wiki.postmarketos.org/wiki/' + WIKI_REDIRECTS[slug])\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"546467254","text":"#!/home/ahmed/anaconda3/envs/Django/bin/python3.9\n# ^ you have to add the path to your specific virtual environment ^\nimport tensorflow as tf\nimport tensorflow_hub as hub\nimport numpy as np\nimport time,imageio,sys,pickle\n\n\n# sys.argv[1] is used for taking the video path from the terminal\nstart = time.time()\nvideo = sys.argv[1]\n#passing the video file to ImageIO to be read later in form of frames\nvideo = imageio.get_reader(video)\ndictionary = {}\n#download and extract the model( faster_rcnn/openimages_v4/inception_resnet_v2 or\n# openimages_v4/ssd/mobilenet_v2) in the same folder\nmodule_handle = \"/home/ahmed/Desktop/VODS/SSD/SSD\"\ndetector = hub.load(module_handle).signatures['default']\n#looping over every frame in the video\nfor index, frames in enumerate(video):\n # converting the images ( video frames ) to tf.float32 which is the only acceptable input format\n #frames = np.resize(frames, (256, 256, 3))\n image = tf.image.convert_image_dtype(frames, tf.float32)[tf.newaxis]\n # passing the converted image to the model\n detector_output = detector(image)\n class_names = detector_output[\"detection_class_entities\"]\n scores = detector_output[\"detection_scores\"]\n # in case there are multiple objects in the frame\n for i in range(len(scores)):\n if scores[i] > 0.3:\n #converting form bytes to string\n object = class_names[i].numpy().decode(\"ascii\")\n #adding the objects that appear in the frames in a dictionary and their frame numbers\n if object not in dictionary:\n dictionary[object] = [index]\n else:\n dictionary[object].append(index)\n\nwith open('new.pickle', 'wb') as handle:\n pickle.dump(dictionary, handle, protocol=pickle.HIGHEST_PROTOCOL)\nend = time.time()\nprint(\"The total time to run the detection is \"+str(end - start))","sub_path":"SSD/tagroba.py","file_name":"tagroba.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"539927569","text":"from django.test import TestCase\nfrom games_odds.tests.webScrapingTests.normalTests.testingWilliamHillGame0 import TestingWilliamHillGame0\nfrom games_odds.tests.webScrapingTests.normalTests.testingWilliamHillGame1 import TestingWilliamHillGame1\nfrom games_odds.tests.webScrapingTests.normalTests.testingWilliamHill import TestingWilliamHill\nfrom games_odds.tests.webScrapingTests.normalTests.testingRefreshDateAndTime0 import TestingRefreshDateAndTime0\nfrom games_odds.tests.webScrapingTests.normalTests.testingRefreshDateAndTime1 import TestingRefreshDateAndTime1\n\nclass RunningTests(TestCase):\n def suite(self):\n suite = TestSuite()\n testingWilliamGame_0 = TestingWilliamHillGame0()\n testingWilliamGame_1 = TestingWilliamHillGame1()\n testingWilliamHill = TestingWilliamHill()\n testingRefreshDateAndTime_0 = TestingRefreshDateAndTime0()\n testingRefreshDateAndTime_1 = TestingRefreshDateAndTime1()\n\n suite.addTest(testingWilliamGame_0)\n suite.addTest(testingWilliamGame_1)\n suite.addTest(testingWilliamHill)\n suite.addTest(testingRefreshDateAndTime_0)\n suite.addTest(testingRefreshDateAndTime_1)\n\n return suite\n\nif __name__ == '__main__':\n runner = TextTestRunner()\n test_suite = RunningTests()\n runner.run(test_suite.suite())\n","sub_path":"games_odds/tests/webScrapingTests/normalTests/runningTests.py","file_name":"runningTests.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"351707873","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 10 16:28:54 2019\n\n@author: Tarun Joshi\n\"\"\"\n\n# Create a list of absentee\n\nn=0\nwith open (\"absentee.txt\", \"wt\") as wf :\n while n<26:\n entry=input(\"enter student name\")\n if not entry:\n break\n else:\n wf.writelines(entry+\"\\n\")\n n=n+1","sub_path":"day 5/absentee.py","file_name":"absentee.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"94182226","text":"import RPi.GPIO as GPIO\nimport time\nimport sys\nGPIO.setmode(GPIO.BCM)\nG = int(sys.argv[1])\nGPIO.setup(G,GPIO.OUT)\ntry:\n\twhile(True):\n\t\tGPIO.output(G,1)\n\t\tinput()\n\t\tGPIO.output(G,0)\n\t\tinput()\nexcept KeyboardInterrupt:\n\tGPIO.cleanup()\n","sub_path":"transistor.py","file_name":"transistor.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"227349055","text":"\"\"\" api.tests.test_logout\n\n This module implements a unit test to test the process of logging out.\n\"\"\"\nfrom identityAPI.api.lib.apiTestCase import APITestCase\n\nfrom identityAPI.api.models import *\n\n#############################################################################\n\nclass LogoutTest(APITestCase):\n \"\"\" The unit test to test the process of logging out.\n \"\"\"\n def test_logout(self):\n \"\"\" Test the process of logging out.\n \"\"\"\n # Create a random user for testing.\n\n user = self.create_random_user()\n\n # Log the user in.\n\n session_token = self.login_user(user['username'],\n user['password'])\n\n # Check that the user's session exists.\n\n try:\n session = Session.objects.get(token=session_token)\n except Session.DoesNotExist:\n session = None\n\n self.assertNotEqual(session, None)\n\n # Now try logging the user out.\n\n response = self.client.post(\"/identity/logout\",\n {'session_token' : session_token})\n\n self.assertEqual(response.status_code, 200)\n\n # Finally, make sure that the user's session no longer exists.\n\n try:\n session = Session.objects.get(token=session_token)\n except Session.DoesNotExist:\n session = None\n\n self.assertEqual(session, None)\n\n","sub_path":"identityAPI/api/tests/test_logout.py","file_name":"test_logout.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"231845999","text":"##animals = [\"ant\", \"bat\", \"cat\"]\n##print animals.index(\"bat\")\n##\n##if animals.index(\"bat\") == 1:\n## print \"You got it nigga\"\n##\n##animals.insert(1, \"dog\")\n##print animals\n\n#### Maintaining Order\n\n##animals = [\"aardvark\", \"badger\", \"duck\", \"emu\", \"fennec fox\"]\n##duck_index = animals.index(\"duck\") # Use index() to find \"duck\"\n##\n### Your code here!\n##animals.insert(duck_index, \"cobra\")\n##\n##\n##print animals # Observe what prints after the insert operation\n\n\n##For One and All\n\n##my_list = [1,9,3,8,5,7]\n##\n##for number in my_list: \"\"\"takes all the numbers from my_list\"\"\"\n## print number * 2\n\n\n##More with 'for'\n\n##start_list = [5, 3, 1, 2, 4]\n##square_list = []\n##\n##for numbers in start_list:\n## square_list.append(numbers ** 2)\n##\n##square_list.sort()\n##\n##print square_list\n\n\n## This Next Part Is Key\n\n##residents = {'Puffin' : 104, 'Sloth' : 105, 'Burmese Python' : 106}\n##\n##print residents['Puffin'] # Prints Puffin's room number\n##\n##print residents['Sloth']\n##\n##print residents['Burmese Python']\n\n\n#### New Entries into Dictionaries\n##\n##menu = {} # Empty dictionary\n##menu['Chicken Alfredo'] = 14.50 # Adding new key-value pair\n##print menu['Chicken Alfredo']\n##\n### Your code here: Add some dish-price pairs to menu!\n##menu['Daves Hot & Juicy'] = 4.5\n##menu['Fries'] = 2.3\n##menu['Soft Drink'] = 1.8\n##\n##\n##\n##print \"There are \" + str(len(menu)) + \" items on the menu.\"\n##print menu\n\n## Changing Your Mind\n\n##zoo_animals = { 'Unicorn' : 'Cotton Candy House',\n##'Sloth' : 'Rainforest Exhibit',\n##'Bengal Tiger' : 'Jungle House',\n##'Atlantic Puffin' : 'Arctic Exhibit',\n##'Rockhopper Penguin' : 'Arctic Exhibit'}\n##\n##del zoo_animals['Unicorn']\n##del zoo_animals['Sloth']\n##del zoo_animals['Bengal Tiger']\n##\n##zoo_animals['Rockhopper Penguin'] = 'Polar Exhibit'\n##print zoo_animals\n\n## Remove a Few THings\n\n##backpack = ['xylophone', 'dagger', 'tent', 'bread loaf']\n##backpack.remove('dagger')\n\n\n## It's Dangerous to Go Alone! Take this\n\ninventory = {\n 'gold' : 500,\n 'pouch' : ['flint', 'twine', 'gemstone'], # Assigned a new list to 'pouch' key\n 'backpack' : ['xylophone','dagger', 'bedroll','bread loaf']\n}\n\ninventory['burlap bag'] = ['apple', 'small ruby', 'three-toed sloth']\ninventory['pocket'] = ['seashell', 'strange berry', 'lint']\n\n\ninventory['pouch'].sort()\ninventory['backpack'].sort()\n\ninventory['backpack'].remove('dagger')\ninventory['gold'] = 550\n\n","sub_path":"Code Academy/Introduction to Lists (1).py","file_name":"Introduction to Lists (1).py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"474112021","text":"import os\nimport tarfile\n\nimport pytest\n\nfrom ..constants import * # NOQA\nfrom ..crypto.key import KeyfileKey\nfrom ..upgrader import AtticRepositoryUpgrader, AtticKeyfileKey\nfrom ..helpers import get_keys_dir\nfrom ..repository import Repository\nfrom . import are_hardlinks_supported\n\n\n# tar with a repo and repo keyfile from attic\nATTIC_TAR = os.path.join(os.path.dirname(__file__), 'attic.tar.gz')\n\n\ndef untar(tarfname, path, what):\n \"\"\"\n extract tar archive to , all stuff starting with .\n\n return path to .\n \"\"\"\n\n def files(members):\n for tarinfo in members:\n if tarinfo.name.startswith(what):\n yield tarinfo\n\n with tarfile.open(tarfname, 'r') as tf:\n tf.extractall(path, members=files(tf))\n\n return os.path.join(path, what)\n\n\ndef repo_valid(path):\n \"\"\"\n utility function to check if borg can open a repository\n\n :param path: the path to the repository\n :returns: if borg can check the repository\n \"\"\"\n with Repository(str(path), exclusive=True, create=False) as repository:\n # can't check raises() because check() handles the error\n return repository.check()\n\n\ndef key_valid(path):\n \"\"\"\n check that the new keyfile is alright\n\n :param path: the path to the key file\n :returns: if the file starts with the borg magic string\n \"\"\"\n keyfile = os.path.join(get_keys_dir(),\n os.path.basename(path))\n with open(keyfile, 'r') as f:\n return f.read().startswith(KeyfileKey.FILE_ID)\n\n\ndef make_attic_repo(dir):\n \"\"\"\n create an attic repo with some stuff in it\n\n :param dir: path to the repository to be created\n :returns: path to attic repository\n \"\"\"\n # there is some stuff in that repo, copied from `RepositoryTestCase.test1`\n return untar(ATTIC_TAR, str(dir), 'repo')\n\n\n@pytest.fixture()\ndef attic_repo(tmpdir):\n return make_attic_repo(tmpdir)\n\n\n@pytest.fixture(params=[True, False])\ndef inplace(request):\n return request.param\n\n\ndef test_convert_segments(attic_repo, inplace):\n \"\"\"test segment conversion\n\n this will load the given attic repository, list all the segments\n then convert them one at a time. we need to close the repo before\n conversion otherwise we have errors from borg\n\n :param attic_repo: a populated attic repository (fixture)\n \"\"\"\n repo_path = attic_repo\n with pytest.raises(Repository.AtticRepository):\n repo_valid(repo_path)\n repository = AtticRepositoryUpgrader(repo_path, create=False)\n with repository:\n segments = [filename for i, filename in repository.io.segment_iterator()]\n repository.convert_segments(segments, dryrun=False, inplace=inplace)\n repository.convert_cache(dryrun=False)\n assert repo_valid(repo_path)\n\n\n@pytest.fixture()\ndef attic_key_file(tmpdir, monkeypatch):\n \"\"\"\n create an attic key file from the given repo, in the keys\n subdirectory of the given tmpdir\n\n :param tmpdir: a temporary directory (a builtin fixture)\n :returns: path to key file\n \"\"\"\n keys_dir = untar(ATTIC_TAR, str(tmpdir), 'keys')\n\n # we use the repo dir for the created keyfile, because we do\n # not want to clutter existing keyfiles\n monkeypatch.setenv('ATTIC_KEYS_DIR', keys_dir)\n\n # we use the same directory for the converted files, which\n # will clutter the previously created one, which we don't care\n # about anyways. in real runs, the original key will be retained.\n monkeypatch.setenv('BORG_KEYS_DIR', keys_dir)\n monkeypatch.setenv('ATTIC_PASSPHRASE', 'test')\n\n return os.path.join(keys_dir, 'repo')\n\n\ndef test_keys(attic_repo, attic_key_file):\n \"\"\"test key conversion\n\n test that we can convert the given key to a properly formatted\n borg key. assumes that the ATTIC_KEYS_DIR and BORG_KEYS_DIR have\n been properly populated by the attic_key_file fixture.\n\n :param attic_repo: path to an attic repository (fixture defined above)\n :param attic_key_file: path to an attic key file (fixture defined above)\n \"\"\"\n keyfile_path = attic_key_file\n assert not key_valid(keyfile_path) # not upgraded yet\n with AtticRepositoryUpgrader(attic_repo, create=False) as repository:\n keyfile = AtticKeyfileKey.find_key_file(repository)\n AtticRepositoryUpgrader.convert_keyfiles(keyfile, dryrun=False)\n assert key_valid(keyfile_path)\n\n\n@pytest.mark.skipif(not are_hardlinks_supported(), reason='hardlinks not supported')\ndef test_convert_all(attic_repo, attic_key_file, inplace):\n \"\"\"test all conversion steps\n\n this runs everything. mostly redundant test, since everything is\n done above. yet we expect a NotImplementedError because we do not\n convert caches yet.\n\n :param attic_repo: path to an attic repository (fixture defined above)\n :param attic_key_file: path to an attic key file (fixture defined above)\n \"\"\"\n repo_path = attic_repo\n\n with pytest.raises(Repository.AtticRepository):\n repo_valid(repo_path)\n\n def stat_segment(path):\n return os.stat(os.path.join(path, 'data', '0', '0'))\n\n def first_inode(path):\n return stat_segment(path).st_ino\n\n orig_inode = first_inode(repo_path)\n with AtticRepositoryUpgrader(repo_path, create=False) as repository:\n # replicate command dispatch, partly\n os.umask(UMASK_DEFAULT)\n backup = repository.upgrade(dryrun=False, inplace=inplace) # note: uses hardlinks internally\n if inplace:\n assert backup is None\n assert first_inode(repository.path) == orig_inode\n else:\n assert backup\n assert first_inode(repository.path) != first_inode(backup)\n # i have seen cases where the copied tree has world-readable\n # permissions, which is wrong\n if 'BORG_TESTS_IGNORE_MODES' not in os.environ:\n assert stat_segment(backup).st_mode & UMASK_DEFAULT == 0\n\n assert key_valid(attic_key_file)\n assert repo_valid(repo_path)\n\n\n@pytest.mark.skipif(not are_hardlinks_supported(), reason='hardlinks not supported')\ndef test_hardlink(tmpdir, inplace):\n \"\"\"test that we handle hard links properly\n\n that is, if we are in \"inplace\" mode, hardlinks should *not*\n change (ie. we write to the file directly, so we do not rewrite the\n whole file, and we do not re-create the file).\n\n if we are *not* in inplace mode, then the inode should change, as\n we are supposed to leave the original inode alone.\"\"\"\n a = str(tmpdir.join('a'))\n with open(a, 'wb') as tmp:\n tmp.write(b'aXXX')\n b = str(tmpdir.join('b'))\n os.link(a, b)\n AtticRepositoryUpgrader.header_replace(b, b'a', b'b', inplace=inplace)\n if not inplace:\n assert os.stat(a).st_ino != os.stat(b).st_ino\n else:\n assert os.stat(a).st_ino == os.stat(b).st_ino\n with open(b, 'rb') as tmp:\n assert tmp.read() == b'bXXX'\n","sub_path":"src/borg/testsuite/upgrader.py","file_name":"upgrader.py","file_ext":"py","file_size_in_byte":6905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"106638341","text":"import dgl\nfrom dgl.data import DGLDataset\nimport dgl.nn.pytorch as dglnn\nfrom dgl.nn.pytorch import GATConv, GraphConv, SAGEConv\nimport os.path as osp\nfrom sys import getsizeof\nimport argparse\n\nimport torch\ntorch.manual_seed(0)\ndgl.seed(0)\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport time\nimport numpy as np\nfrom tqdm import tqdm\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nclass IGL260MDataset(object):\n def __init__(self, root: str, size: str, in_memory: int, classes: int):\n self.dir = root\n self.size = size\n self.in_memory = in_memory\n self.num_classes = classes\n \n\n def num_features(self) -> int:\n return 1024\n \n\n def num_classes(self, type_of_class: str) -> int:\n if type_of_class == 'small':\n return 19\n else:\n return 2983\n\n @property\n def paper_feat(self) -> np.ndarray:\n path = osp.join(self.dir, self.size, 'processed', 'paper', 'node_feat.npy')\n if self.in_memory:\n return np.load(path)\n else:\n return np.load(path, mmap_mode='r')\n\n @property\n def paper_label(self) -> np.ndarray:\n if self.num_classes == 19:\n path = osp.join(self.dir, self.size, 'processed', 'paper', 'node_label_19.npy')\n else:\n path = osp.join(self.dir, self.size, 'processed', 'paper', 'node_label_2K.npy')\n if self.in_memory:\n return np.load(path)\n else:\n return np.load(path, mmap_mode='r')\n\n @property\n def paper_edge(self) -> np.ndarray:\n path = osp.join(self.dir, self.size, 'processed', 'paper__cites__paper', 'edge_index.npy')\n if self.in_memory:\n return np.load(path)\n else:\n return np.load(path, mmap_mode='r')\n\nclass IGL260M(DGLDataset):\n def __init__(self, args):\n self.dir = args.path\n super().__init__(name='IGB260M')\n\n def process(self):\n dataset = IGL260MDataset(root=self.dir, size=args.dataset_size, in_memory=args.in_memory, classes=args.num_classes)\n node_features = torch.from_numpy(dataset.paper_feat)\n node_edges = torch.from_numpy(dataset.paper_edge)\n node_labels = torch.from_numpy(dataset.paper_label).to(torch.long)\n\n self.graph = dgl.graph((node_edges[:, 0],node_edges[:, 1]), num_nodes=node_features.shape[0])\n\n self.graph.ndata['feat'] = node_features\n self.graph.ndata['label'] = node_labels\n \n self.graph = dgl.remove_self_loop(self.graph)\n self.graph = dgl.add_self_loop(self.graph)\n \n n_nodes = node_features.shape[0]\n\n n_train = int(n_nodes * 0.6)\n n_val = int(n_nodes * 0.2)\n \n train_mask = torch.zeros(n_nodes, dtype=torch.bool)\n val_mask = torch.zeros(n_nodes, dtype=torch.bool)\n test_mask = torch.zeros(n_nodes, dtype=torch.bool)\n \n train_mask[:n_train] = True\n val_mask[n_train:n_train + n_val] = True\n test_mask[n_train + n_val:] = True\n \n self.graph.ndata['train_mask'] = train_mask\n self.graph.ndata['val_mask'] = val_mask\n self.graph.ndata['test_mask'] = test_mask\n \n\n def __getitem__(self, i):\n return self.graph\n\n def __len__(self):\n return 1\n\nclass GCN(nn.Module):\n def __init__(self,\n in_feats,\n n_hidden,\n n_classes,\n n_layers,\n activation,\n dropout):\n super(GCN, self).__init__()\n self.layers = nn.ModuleList()\n self.n_layers = n_layers\n self.n_hidden = n_hidden\n self.n_classes = n_classes\n # input layer\n self.layers.append(GraphConv(in_feats, n_hidden, activation=activation))\n # hidden layers\n for i in range(n_layers - 1):\n self.layers.append(GraphConv(n_hidden, n_hidden, activation=activation))\n # output layer\n self.layers.append(GraphConv(n_hidden, n_classes))\n self.dropout = nn.Dropout(p=dropout)\n self.activation = activation\n\n def forward(self, blocks, x):\n h = x\n for l, (layer, block) in enumerate(zip(self.layers, blocks)):\n if l != len(self.layers) - 1:\n # h = self.activation(h)\n h = self.dropout(h)\n h = layer(block, h)\n return h\n\n def inference(self, g, x, batch_size, device):\n \"\"\"\n Inference with the GraphSAGE model on full neighbors (i.e. without neighbor sampling).\n g : the entire graph.\n x : the input of entire node set.\n The inference code is written in a fashion that it could handle any number of nodes and\n layers.\n \"\"\"\n # During inference with sampling, multi-layer blocks are very inefficient because\n # lots of computations in the first few layers are repeated.\n # Therefore, we compute the representation of all nodes layer by layer. The nodes\n # on each layer are of course splitted in batches.\n # TODO: can we standardize this?\n for l, layer in enumerate(self.layers):\n y = torch.zeros(g.number_of_nodes(), self.n_hidden if l !=\n len(self.layers) - 1 else self.n_classes)\n\n sampler = dgl.dataloading.MultiLayerFullNeighborSampler(1)\n dataloader = dgl.dataloading.NodeDataLoader(\n g,\n torch.arange(g.number_of_nodes()),\n sampler,\n batch_size=batch_size,\n shuffle=True,\n drop_last=False,\n num_workers=4)\n\n for input_nodes, output_nodes, blocks in dataloader:\n block = blocks[0]\n\n block = block.int().to(device)\n h = x[input_nodes].to(device)\n h = layer(block, h)\n if l != len(self.layers) - 1:\n h = self.activation(h)\n h = self.dropout(h)\n\n y[output_nodes] = h.cpu()\n\n x = y\n return y\nclass GAT(nn.Module):\n def __init__(\n self, in_feats, n_hidden, n_classes, n_layers, num_heads, activation\n ):\n super().__init__()\n self.n_layers = n_layers\n self.n_hidden = n_hidden\n self.n_classes = n_classes\n self.layers = nn.ModuleList()\n self.layers.append(\n dglnn.GATConv(\n (in_feats, in_feats),\n n_hidden,\n num_heads=num_heads,\n activation=activation,\n )\n )\n for i in range(1, n_layers - 1):\n self.layers.append(\n dglnn.GATConv(\n (n_hidden * num_heads, n_hidden * num_heads),\n n_hidden,\n num_heads=num_heads,\n activation=activation,\n )\n )\n self.layers.append(\n dglnn.GATConv(\n (n_hidden * num_heads, n_hidden * num_heads),\n n_classes,\n num_heads=num_heads,\n activation=None,\n )\n )\n\n def forward(self, blocks, x):\n h = x\n for l, (layer, block) in enumerate(zip(self.layers, blocks)):\n # We need to first copy the representation of nodes on the RHS from the\n # appropriate nodes on the LHS.\n # Note that the shape of h is (num_nodes_LHS, D) and the shape of h_dst\n # would be (num_nodes_RHS, D)\n h_dst = h[: block.num_dst_nodes()]\n # Then we compute the updated representation on the RHS.\n # The shape of h now becomes (num_nodes_RHS, D)\n if l < self.n_layers - 1:\n h = layer(block, (h, h_dst)).flatten(1)\n else:\n h = layer(block, (h, h_dst))\n h = h.mean(1)\n return h.log_softmax(dim=-1)\n\n def inference(self, g, x, batch_size, device):\n \"\"\"\n Inference with the GAT model on full neighbors (i.e. without neighbor sampling).\n g : the entire graph.\n x : the input of entire node set.\n The inference code is written in a fashion that it could handle any number of nodes and\n layers.\n \"\"\"\n # During inference with sampling, multi-layer blocks are very inefficient because\n # lots of computations in the first few layers are repeated.\n # Therefore, we compute the representation of all nodes layer by layer. The nodes\n # on each layer are of course splitted in batches.\n # TODO: can we standardize this?\n # TODO: make thiw into a variable\n num_heads = 2\n for l, layer in enumerate(self.layers):\n if l < self.n_layers - 1:\n y = torch.zeros(\n g.num_nodes(),\n self.n_hidden * num_heads\n if l != len(self.layers) - 1\n else self.n_classes,\n )\n else:\n y = torch.zeros(\n g.num_nodes(),\n self.n_hidden\n if l != len(self.layers) - 1\n else self.n_classes,\n )\n\n sampler = dgl.dataloading.MultiLayerFullNeighborSampler(1)\n dataloader = dgl.dataloading.DataLoader(\n g,\n torch.arange(g.num_nodes()),\n sampler,\n batch_size=batch_size,\n shuffle=True,\n drop_last=False,\n num_workers=4,\n )\n\n for input_nodes, output_nodes, blocks in dataloader:\n block = blocks[0].int().to(device)\n\n h = x[input_nodes].to(device)\n h_dst = h[: block.num_dst_nodes()]\n if l < self.n_layers - 1:\n h = layer(block, (h, h_dst)).flatten(1)\n else:\n h = layer(block, (h, h_dst))\n h = h.mean(1)\n h = h.log_softmax(dim=-1)\n\n y[output_nodes] = h.cpu()\n\n x = y\n return y\nclass SAGE(nn.Module):\n def __init__(self,\n in_feats,\n n_hidden,\n n_classes,\n n_layers,\n activation,\n dropout,\n aggregator_type):\n super().__init__()\n self.n_layers = n_layers\n self.n_hidden = n_hidden\n self.n_classes = n_classes\n self.layers = nn.ModuleList()\n self.layers.append(dglnn.SAGEConv(in_feats, n_hidden, aggregator_type))\n for i in range(1, n_layers - 1):\n self.layers.append(dglnn.SAGEConv(n_hidden, n_hidden, aggregator_type))\n self.layers.append(dglnn.SAGEConv(n_hidden, n_classes, aggregator_type))\n self.dropout = nn.Dropout(dropout)\n self.activation = activation\n\n def forward(self, blocks, x):\n h = x\n for l, (layer, block) in enumerate(zip(self.layers, blocks)):\n h = layer(block, h)\n if l != len(self.layers) - 1:\n h = self.activation(h)\n h = self.dropout(h)\n return h\n\n def inference(self, g, x, batch_size, device):\n \"\"\"\n Inference with the GraphSAGE model on full neighbors (i.e. without neighbor sampling).\n g : the entire graph.\n x : the input of entire node set.\n The inference code is written in a fashion that it could handle any number of nodes and\n layers.\n \"\"\"\n # During inference with sampling, multi-layer blocks are very inefficient because\n # lots of computations in the first few layers are repeated.\n # Therefore, we compute the representation of all nodes layer by layer. The nodes\n # on each layer are of course splitted in batches.\n # TODO: can we standardize this?\n for l, layer in enumerate(self.layers):\n y = torch.zeros(g.number_of_nodes(), self.n_hidden if l !=\n len(self.layers) - 1 else self.n_classes)\n\n sampler = dgl.dataloading.MultiLayerFullNeighborSampler(1)\n dataloader = dgl.dataloading.NodeDataLoader(\n g,\n torch.arange(g.number_of_nodes()),\n sampler,\n batch_size=batch_size,\n shuffle=True,\n drop_last=False,\n num_workers=4)\n\n for input_nodes, output_nodes, blocks in dataloader:\n block = blocks[0]\n\n block = block.int().to(device)\n h = x[input_nodes].to(device)\n h = layer(block, h)\n if l != len(self.layers) - 1:\n h = self.activation(h)\n h = self.dropout(h)\n\n y[output_nodes] = h.cpu()\n\n x = y\n return y\n\ndef compute_acc(pred, labels):\n \"\"\"\n Compute the accuracy of prediction given the labels.\n \"\"\"\n labels = labels.long()\n return (torch.argmax(pred, dim=1) == labels).float().sum() / len(pred)\n\ndef evaluate(model, g, inputs, labels, val_nid, batch_size, device):\n \"\"\"\n Evaluate the model on the validation set specified by ``val_nid``.\n g : The entire graph.\n inputs : The features of all the nodes.\n labels : The labels of all the nodes.\n val_nid : the node Ids for validation.\n batch_size : Number of nodes to compute at the same time.\n device : The GPU device to evaluate on.\n \"\"\"\n model.eval()\n with torch.no_grad():\n pred = model.inference(g, inputs, batch_size, device)\n model.train()\n return compute_acc(pred[val_nid], labels[val_nid])\n\ndef load_subtensor(g, seeds, input_nodes, device):\n \"\"\"\n Copys features and labels of a set of nodes onto GPU.\n \"\"\"\n batch_inputs = g.ndata['features'][input_nodes].to(device)\n batch_labels = g.ndata['labels'][seeds].to(device)\n return batch_inputs, batch_labels\n\ndef track_acc(g, args, model_type):\n train_accuracy = []\n test_accuracy = []\n g.ndata['features'] = g.ndata['feat']\n g.ndata['labels'] = g.ndata['label']\n in_feats = g.ndata['features'].shape[1]\n n_classes = args.num_classes\n\n # Create csr/coo/csc formats before launching training processes with multi-gpu.\n # This avoids creating certain formats in each sub-process, which saves momory and CPU.\n g.create_formats_()\n\n num_epochs = args.epochs\n num_hidden = args.hidden_channels\n num_layers = args.num_layers\n fan_out = args.fan_out\n batch_size = args.batch_size\n lr = args.learning_rate\n dropout = args.dropout\n num_workers = args.num_workers\n\n train_nid = torch.nonzero(g.ndata['train_mask'], as_tuple=True)[0]\n\n # Create PyTorch DataLoader for constructing blocks\n sampler = dgl.dataloading.MultiLayerNeighborSampler(\n [int(fanout) for fanout in fan_out.split(',')])\n \n dataloader = dgl.dataloading.NodeDataLoader(\n g,\n train_nid,\n sampler,\n batch_size=batch_size,\n shuffle=True,\n drop_last=False,\n num_workers=num_workers)\n\n # Define model and optimizer\n if model_type == 'gcn':\n model = GCN(in_feats, num_hidden, n_classes, 1, F.relu, dropout)\n if model_type == 'sage':\n model = SAGE(in_feats, num_hidden, n_classes, num_layers, F.relu, dropout, 'gcn')\n if model_type == 'gat':\n model = GAT(in_feats, num_hidden, n_classes, num_layers, 2, F.relu)\n\n model = model.to(device)\n loss_fcn = nn.CrossEntropyLoss()\n loss_fcn = loss_fcn.to(device)\n optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=args.decay)\n\n # Training loop\n avg = 0\n best_test_acc = 0\n log_every = 1\n training_start = time.time()\n for epoch in (range(num_epochs)):\n # Loop over the dataloader to sample the computation dependency graph as a list of\n # blocks.\n epoch_loss = 0\n gpu_mem_alloc = 0\n epoch_start = time.time()\n for step, (input_nodes, seeds, blocks) in (enumerate(dataloader)):\n # Load the input features as well as output labels\n #batch_inputs, batch_labels = load_subtensor(g, seeds, input_nodes, device)\n blocks = [block.int().to(device) for block in blocks]\n batch_inputs = blocks[0].srcdata['features']\n batch_labels = blocks[-1].dstdata['labels']\n\n # Compute loss and prediction\n batch_pred = model(blocks, batch_inputs)\n loss = loss_fcn(batch_pred, batch_labels)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n epoch_loss += loss.detach()\n\n gpu_mem_alloc += (\n torch.cuda.max_memory_allocated() / 1000000\n if torch.cuda.is_available()\n else 0\n )\n\n train_g = g\n train_nid = torch.nonzero(\n train_g.ndata['train_mask'], as_tuple=True)[0]\n train_acc = evaluate(\n model, train_g, train_g.ndata['features'], train_g.ndata['labels'], train_nid, batch_size, device)\n \n test_g = g\n test_nid = torch.nonzero(\n test_g.ndata['test_mask'], as_tuple=True)[0]\n test_acc = evaluate(\n model, test_g, test_g.ndata['features'], test_g.ndata['labels'], test_nid, batch_size, device)\n\n if test_acc.item() > best_test_acc:\n best_test_acc = test_acc.item()\n tqdm.write(\n \"Epoch {:05d} | Loss {:.4f} | Train Acc {:.4f} | Test Acc {:.4f} | Time {:.2f}s | GPU {:.1f} MB\".format(\n epoch,\n epoch_loss,\n train_acc.item(),\n test_acc.item(),\n time.time() - epoch_start,\n gpu_mem_alloc\n )\n )\n test_accuracy.append(test_acc.item())\n train_accuracy.append(train_acc.item())\n # torch.save(model.state_dict(), args.modelpath)\n print()\n print(\"Total time taken: \", time.time() - training_start)\n\n return best_test_acc, train_accuracy, test_accuracy\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--path', type=str, default='/mnt/nvme14/IGB260M/')\n\n parser.add_argument('--modelpath', type=str, default='gcn_19.pt')\n\n parser.add_argument('--dataset_size', type=str, default='tiny', choices=['tiny', 'small', 'medium', 'large', 'full'])\n parser.add_argument('--num_classes', type=int, default=19, choices=[19, 2983])\n parser.add_argument('--hidden_channels', type=int, default=16)\n parser.add_argument('--fan_out', type=str, default='5,10')\n parser.add_argument('--num_layers', type=int, default=2)\n parser.add_argument('--learning_rate', type=int, default=0.01)\n parser.add_argument('--decay', type=int, default=0.001)\n parser.add_argument('--num_workers', type=int, default=4)\n parser.add_argument('--batch_size', type=int, default=2048*16)\n parser.add_argument('--dropout', type=float, default=0.2)\n\n\n parser.add_argument('--epochs', type=int, default=20)\n\n parser.add_argument('--model', type=str, default='gcn',\n choices=['gat', 'sage', 'gcn'])\n parser.add_argument('--in_memory', type=int, default=1)\n parser.add_argument('--device', type=str, default='0')\n args = parser.parse_args()\n\n print(\"Dataset_size: \" + args.dataset_size) \n print(\"Model : \" + args.model)\n print(\"Num_classes : \" + str(args.num_classes))\n print()\n \n device = f'cuda:1' if torch.cuda.is_available() else 'cpu'\n\n dataset = IGL260M(args)\n g = dataset[0]\n\n best_test_acc, train_acc, test_acc = track_acc(g, args, model_type=args.model)\n\n print(f\"Train accuracy: {np.mean(train_acc):.2f} \\u00B1 {np.std(train_acc):.2f} \\t Best: {np.max(train_acc) * 100:.4f}%\")\n print(f\"Test accuracy: {np.mean(test_acc):.2f} \\u00B1 {np.std(test_acc):.2f} \\t Best: {np.max(test_acc) * 100:.4f}%\")\n print()\n print(\" -------- For debugging --------- \")\n print(\"Parameters: \", args)\n print(g)\n print(\"Train accuracy: \", train_acc)\n print(\"Test accuracy: \", test_acc)\n\n\n","sub_path":"results/IGB_tiny/gnn.py","file_name":"gnn.py","file_ext":"py","file_size_in_byte":20135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"423620409","text":"from random import *\n\n\n# Initializes game.\ndef new_game(size):\n return [[0 for col in range(size)] for row in range(size)]\n\n\n# Adds a two to the board.\ndef add_two(board):\n rand1 = randint(0, len(board) - 1)\n rand2 = randint(0, len(board) - 1)\n while board[rand1][rand2] != 0:\n rand1 = randint(0, len(board) - 1)\n rand2 = randint(0, len(board) - 1)\n board[rand1][rand2] = 2\n return board\n\n\n# Determines state of the game.\n# Returns -1 if loose, 0 if unknown, and 1 if win.\ndef get_game_state(board):\n last_row = len(board) - 1\n last_col = len(board[last_row]) - 1\n\n # Determines if the game is in win.\n for row in range(len(board)):\n for col in range(len(board[row])):\n if board[row][col] == 2048:\n return 1\n\n # Checks if it is possible to condense.\n for row in range(len(board) - 1):\n for col in range(len(board[row]) - 1):\n if (board[row][col] == board[row + 1][col] or\n board[row][col] == board[row][col + 1]):\n return 0\n\n # Checks for any zero entries.\n for row in range(len(board)):\n for col in range(len(board)):\n if board[row][col] == 0:\n return 0\n\n # Checks left/right entries on the last row.\n for col in range(last_col):\n if board[last_row][col] == board[last_row][col + 1]:\n return 0\n\n # Checks up/down on the last column.\n for row in range(last_row):\n if board[row][last_col] == board[row + 1][last_col]:\n return 0\n\n return -1\n\n\ndef reverse(board):\n new_board = []\n for row in range(len(board)):\n new_board.append([])\n for col in range(len(board[row])):\n value = board[row][len(board[row]) - col - 1]\n new_board[row].append(value)\n return new_board\n\n\n# .\ndef transpose(board):\n new_board = []\n for col in range(len(board[0])):\n new_board.append([])\n for row in range(len(board)):\n new_board[col].append(board[row][col])\n return new_board\n\n\n# Shifts everything to the left.\ndef shift_left(board):\n new_board = new_game(len(board))\n made_move = False\n\n for row in range(len(board)):\n count = 0\n for col in range(len(board[row])):\n if board[row][col] != 0:\n new_board[row][count] = board[row][col]\n if col != count:\n made_move = True\n count += 1\n return (new_board, made_move)\n\n\n# Merges blocks.\ndef merge(board):\n made_move = False\n for row in range(len(board)):\n for col in range(len(board[row]) - 1):\n if (board[row][col] == board[row][col + 1] and\n board[row][col] != 0):\n board[row][col] *= 2\n board[row][col + 1] = 0\n made_move = True\n return (board, made_move)\n\n\n# Moves the board.\ndef move_board(game, direction):\n has_shift = False\n has_merge = False\n\n if direction == 'w':\n print('... UP ...')\n game = transpose(game)\n game, has_shift = shift_left(game)\n game, has_merge = merge(game)\n game, _ = shift_left(game)\n game = transpose(game)\n elif direction == 'a':\n print('... LEFT ...')\n game, has_shift = shift_left(game)\n game, has_merge = merge(game)\n game, _ = shift_left(game)\n elif direction == 's':\n print('... RIGHT ...')\n game = reverse(game)\n game, has_shift = shift_left(game)\n game, has_merge = merge(game)\n game, _ = shift_left(game)\n game = reverse(game)\n elif direction == 'z':\n print('... DOWN ...')\n game = reverse(transpose(game))\n game, has_shift = shift_left(game)\n game, has_merge = merge(game)\n game, _ = shift_left(game)\n game = transpose(reverse(game))\n else:\n print('... INVALID MOVE ...')\n\n made_move = has_shift or has_merge\n return (game, made_move)\n\n\n# Prints the board.\ndef print_board(board):\n for row in board:\n for col in row:\n print('{0} '.format(col), end=' ')\n print()\n print()\n\n\ndef main():\n # Initialize board.\n board = new_game(size=4)\n add_two(board)\n add_two(board)\n print_board(board)\n\n # Loop until game state ended.\n game_state = 0\n\n while game_state == 0:\n direction = input('Enter direction to move (w = UP, a = LEFT, s = RIGHT, z = DOWN): ')\n board, made_move = move_board(board, direction)\n if made_move:\n add_two(board)\n\n # Prints the board.\n print_board(board)\n\n # Set variables for next loop.\n game_state = get_game_state(board)\n\n # Prints the results of the game.\n if game_state == 1:\n print('YOU WON!')\n else:\n print('YOU LOST')\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/test_files/2048.py","file_name":"2048.py","file_ext":"py","file_size_in_byte":4867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"42516355","text":"# Hajrá codeorgok\n#tehát mentse bele egy listába a találatok számát, majd ezt a listát lefuttatom egy loop-ban \n# és ahol a lista eleme egyenlő a már meglévő listával, ott írjon be X-et\nimport os\n\na_list = [x for x in range (1, 26)]\nb_list = [x for x in range (1, 26)]\nb_list_w_hit = []\n\n# clear the whole screen\ndef cls():\n os.system('cls' if os.name=='nt' else 'clear')\n\n\n# print the board\ndef a_print_board_to_choose():\n print (\"\"\"Player 'A' can choose a five numbers :)\"\"\")\n for x in range(len(a_list)):\n if a_list[x] % 5 == 0:\n print(a_list[x], \"\\n\")\n else:\n print (a_list[x], end= \"\\t\")\n\n\ndef a_choose(a_list): \n y = 0\n i = 0\n a_shoot_list = []\n \n while (y < 5):\n a_shoot_num = int(input(\"Where do U want to shoot: \"))\n # take equal the a_shoot_num with the same element for a_list\n for x in range(len(a_list)):\n if (a_shoot_num == a_list[x]):\n a_list[x] = \"X\"\n a_shoot_list.append(a_shoot_num) \n else:\n continue\n \n for item in a_list:\n if (i == 4 or i == 9 or i == 14 or i == 19 or i == 24):\n print(a_list[i], \"\\n\")\n i += 1\n else:\n print (a_list[i], end= \"\\t\")\n i += 1\n i = 0\n y += 1\n \n return (a_shoot_list)\n\ndef a_hit(shoot_list):\n # create an arbitrary list and fill it\n b_list = [1, 2, 3, 4, 5, \"X\", 7, \"X\", 9, 10, 11, 12, \"X\", 14, 15, \"X\", 17, 18, 19, 20, 21, \"X\", 23, 24, 25]\n\n\n for x, item in enumerate(shoot_list):\n if (b_list[item - 1] == \"X\"):\n print (\"Talált: \", item, \"\\n\")\n b_list[item - 1] = \"Talált\"\n else:\n print (\"Nem talált: \", item, \"\\n\")\n b_list[item - 1] = \"Nem talált\"\n \n # az a bja ezzel, hogy 5 alkalommal írja bele a b_list_w_hit-be\n return (b_list)\n\ndef b_w_hit_print(b_list_w_hit):\n\n for x in range(0,5):\n for index, item in enumerate(b_list_w_hit):\n if (x == 4 or x == 9 or x == 14 or x == 19 or x == 24):\n print(b_list_w_hit[x][index], \"\\n\")\n \n else:\n print (b_list_w_hit[x][index], end= \"\\t\")\n\n\ndef main():\n # print the table\n a_print_board_to_choose()\n \n # create a list and save the return value of the a_choose func\n a_shoot_list = a_choose(a_list)\n cls()\n print(\"You choose the following coordinates: \", a_shoot_list, \"\\n\")\n\n b_list_w_hit = a_hit(a_shoot_list)\n b_w_hit_print(b_list_w_hit)\n # print (b_list_w_hit)\n # b_list_w_hit.append(a_hit(a_shoot_list))\n \n\n #a_shoot_num = int(input(\"Where do U want to shoot: \"))\n #a_shoot_list.append(a_shoot_num)\n #a_choose(a_shoot_num)\n #b_list_w_hit.append(a_hit(a_shoot_list))\n\n # cls()\n \n #print (b_list_w_hit)\n # b_w_hit_print(b_list_w_hit)\n #print (\"B lista találattal: \", b_list_w_hit, \"\\n\")\n\n\n\nmain()","sub_path":"game8.py","file_name":"game8.py","file_ext":"py","file_size_in_byte":2987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"129959565","text":"#!/usr/bin/env python3\n# vim: set fileencoding=utf-8\n\nimport requests\n\n# adresa s testovaci REST API sluzbou\nURL = \"https://httpbin.org/image\"\n\n# hlavicka posilana v dotazu\nheaders = {\"accept\": \"image/jpeg\"}\n\n# poslani HTTP dotazu typu GET\nresponse = requests.get(URL, headers=headers)\n\n# precteni hlavicek\nheaders = response.headers\n\n# vypis typu internetoveho media\nprint(\"Typ dat:\", headers.get(\"content-type\"))\n\n# vypis delky dat predanych v tele\nprint(\"Delka dat:\", headers.get(\"content-length\"))\n\nprint(response.raw)\n\nwith open(\"test2.jpg\", \"wb\") as fout:\n for block in response.iter_content(chunk_size=128):\n fout.write(block)\n","sub_path":"Python2/examples/requests/15_binary_data_by_header_jpeg.py","file_name":"15_binary_data_by_header_jpeg.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"273127714","text":"def hbb():\n max_h = 0\n for i in range(n-2, 0, -1):\n temp = (total[-1] - honey[n-1] - honey[i]) + (total[i] - honey[i])\n max_h = max(max_h, temp)\n return max_h\n\ndef bhb():\n max_h = 0\n for i in range(1, n-1):\n temp = (total[i] - honey[0]) + (total[-1] - total[i-1] - honey[-1])\n max_h = max(max_h, temp)\n return max_h\n\ndef bbh():\n max_h = 0\n for i in range(1, n-1):\n temp = (total[-1] - honey[0] - honey[i]) + (total[-1] - total[i])\n max_h = max(max_h, temp)\n return max_h\n\nn = int(input())\nhoney = list(map(int, input().split()))\ntotal = []\ntotal.append(honey[0])\nfor i in range(1, n):\n total.append(total[i-1] + honey[i])\n\nresult = max(hbb(), bhb(), bbh())\nprint(result)","sub_path":"Baekjoon training/Python/Baekjoon - 21758 (꿀 따기).py","file_name":"Baekjoon - 21758 (꿀 따기).py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"425047299","text":"from manimlib.imports import *\r\n\r\n\r\nclass Type(Animation):\r\n CONFIG = {\r\n \"suspend_mobject_updating\": False,\r\n \"int_func\": np.floor,\r\n \"rate_func\": linear,\r\n \"run_time\": None,\r\n }\r\n\r\n def __init__(self, mobjects, cursor=None, **kwargs):\r\n self.speed = kwargs.pop('speed', 10)\r\n self.all_submobs = []\r\n self.positions = []\r\n max_width = 0\r\n # print('mobjects.submobjects', len(mobjects.submobjects))\r\n for mobject in mobjects.submobjects:\r\n x = None\r\n for subobject in mobject.submobjects:\r\n if isinstance(subobject, VMobjectFromSVGPathstring):\r\n max_width = max(max_width, subobject.get_width())\r\n x = subobject.get_x()\r\n self.positions.append((len(self.all_submobs), x, mobject.get_y()))\r\n self.all_submobs.append(subobject)\r\n x += subobject.get_width()\r\n if x:\r\n self.positions.append((len(self.all_submobs), x, mobject.get_y()))\r\n self.positions.append(self.positions[-1])\r\n self.positions.append(self.positions[-1])\r\n # print('len', len(self.positions))\r\n self.gap = max_width / 3\r\n self.cursor = cursor\r\n super().__init__(mobjects, **kwargs)\r\n self.run_time = self.run_time or len(self.positions)/self.speed\r\n\r\n def interpolate_mobject(self, alpha):\r\n n_submobs = len(self.positions)\r\n index = int(self.int_func(alpha * n_submobs))\r\n self.update_submobject_list(index)\r\n\r\n def update_submobject_list(self, index):\r\n last_idx = max(0, index - 1)\r\n idx, x, y = self.positions[last_idx]\r\n display_objs = self.all_submobs[:idx]\r\n if self.cursor:\r\n self.cursor.set_x(x + self.gap).set_y(y)\r\n display_objs.append(self.cursor)\r\n self.mobject.submobjects = display_objs\r\n","sub_path":"from_rahul/utils/type_animation.py","file_name":"type_animation.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"496106141","text":"#\n# [433] Minimum Genetic Mutation\n#\n# https://leetcode.com/problems/minimum-genetic-mutation/description/\n#\n# algorithms\n# Medium (35.54%)\n# Total Accepted: 14.6K\n# Total Submissions: 41.1K\n# Testcase Example: '\"AACCGGTT\"\\n\"AACCGGTA\"\\n[\"AACCGGTA\"]'\n#\n# A gene string can be represented by an 8-character long string, with choices\n# from \"A\", \"C\", \"G\", \"T\".\n#\n# Suppose we need to investigate about a mutation (mutation from \"start\" to\n# \"end\"), where ONE mutation is defined as ONE single character changed in the\n# gene string.\n#\n# For example, \"AACCGGTT\" -> \"AACCGGTA\" is 1 mutation.\n#\n# Also, there is a given gene \"bank\", which records all the valid gene\n# mutations. A gene must be in the bank to make it a valid gene string.\n#\n# Now, given 3 things - start, end, bank, your task is to determine what is the\n# minimum number of mutations needed to mutate from \"start\" to \"end\". If there\n# is no such a mutation, return -1.\n#\n# Note:\n#\n#\n# Starting point is assumed to be valid, so it might not be included in the\n# bank.\n# If multiple mutations are needed, all mutations during in the sequence must\n# be valid.\n# You may assume start and end string is not the same.\n#\n#\n#\n#\n# Example 1:\n#\n#\n# start: \"AACCGGTT\"\n# end: \"AACCGGTA\"\n# bank: [\"AACCGGTA\"]\n#\n# return: 1\n#\n#\n#\n#\n# Example 2:\n#\n#\n# start: \"AACCGGTT\"\n# end: \"AAACGGTA\"\n# bank: [\"AACCGGTA\", \"AACCGCTA\", \"AAACGGTA\"]\n#\n# return: 2\n#\n#\n#\n#\n# Example 3:\n#\n#\n# start: \"AAAAACCC\"\n# end: \"AACCCCCC\"\n# bank: [\"AAAACCCC\", \"AAACCCCC\", \"AACCCCCC\"]\n#\n# return: 3\n#\n#\n#\n#\n#\n\n# Jarron:\n# - bfs\n\nfrom collections import deque\n\n\nclass Solution(object):\n def minMutation(self, start, end, bank):\n \"\"\"\n :type start: str\n :type end: str\n :type bank: List[str]\n :rtype: int\n \"\"\"\n bank = set(bank)\n bank.discard(start)\n\n dp = dict()\n pools = deque([start])\n dp[start] = 0\n\n def mutate_gene(gene):\n for candidate in list(bank):\n if sum(gene[i] != candidate[i] for i in range(len(gene))) == 1:\n bank.discard(candidate)\n yield candidate\n\n while pools and end not in dp:\n gene = pools.popleft()\n cost = dp[gene]\n\n for next_gene in mutate_gene(gene):\n pools.append(next_gene)\n dp[next_gene] = cost + 1\n\n if end in dp:\n return dp[end]\n else:\n return -1\n","sub_path":"src/433.minimum-genetic-mutation.py","file_name":"433.minimum-genetic-mutation.py","file_ext":"py","file_size_in_byte":2458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"242971529","text":"class Solution(object):\n def canPermutePalindrome(self, s):\n \"\"\"\n :type s: str\n :rtype: bool\n \"\"\"\n import collections\n word_dict = collections.defaultdict(int)\n for char in s:\n word_dict[char] += 1\n odd_count = 0\n for key in word_dict.keys():\n if word_dict[key] % 2 == 1:\n odd_count += 1\n if odd_count > 1:\n return False\n return True\n","sub_path":"palindrome_permutation/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"619353651","text":"import torchvision.models.resnet as Resnet\n\nimport torch\nimport torch.nn as nn\n\nimport numpy as np\n\nclass Model(nn.Module):\n def __init__(self, num_classes, pretrained_path):\n super(Model, self).__init__()\n\n self.num_classes = num_classes\n self.pretrained_path = pretrained_path\n\n self.model = Resnet.resnet50(pretrained=False)\n if self.pretrained_path != \"\":\n pth = torch.load(pretrained_path)\n self.model.load_state_dict(pth)\n print(\"Load pth success.....\")\n num_ftrs = self.model.fc.in_features\n self.model.fc = nn.Linear(num_ftrs, self.num_classes)\n \n def forward(self, x):\n x = self.model(x)\n\n return x\n\nif __name__ == \"__main__\":\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n model_path = \"/Users/hanxu/Documents/code/pytorch_model/resnet/resnet50-19c8e357.pth\"\n model = ResnetModel(num_classes=5, pretrained_path=model_path).to(device)\n model.eval() # 测试模式\n # print(model)\n x = torch.randn(1, 3, 224, 224) # 模拟一张224*224的图片 batch_size=1 3通道\n pre = model(x)\n print(pre.shape, pre)\n\n \n ","sub_path":"model/resnet.py","file_name":"resnet.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"271197632","text":"# -*- coding: utf-8 -*-\n\n__author__ = \"Helge Helo Klemetsdal, Adam Julius Olof Kviman\"\n__email__ = \"hegkleme@nmbu.no, juliukvi@nmbu.no\"\n\nimport pytest\nfrom biosim.simulation import BioSim\nimport glob\nimport os\nimport os.path\nimport shutil\n\n\ndef test_simulation_set_animal_parameters():\n \"\"\"Test to see that incorrect species string gives ValueError\"\"\"\n sim = BioSim(island_map=\"OO\\nOO\", ini_pop=[], seed=1)\n with pytest.raises(ValueError):\n sim.set_animal_parameters(\"Omnivore\", {\"w_birth\": 8.0})\n\n\ndef test_simulation_set_landscape_parameters():\n \"\"\"Test to see that incorrect landscape string gives ValueError\"\"\"\n sim = BioSim(island_map=\"OO\\nOO\", ini_pop=[], seed=1)\n with pytest.raises(ValueError):\n sim.set_landscape_parameters(\"D\", {\"fodder\": 8.0})\n\n\ndef test_simulation_make_movie_no_base():\n \"\"\"Test to see that trying to create movies with no img_base raises\n RuntimeError\"\"\"\n sim = BioSim(island_map=\"OO\\nOO\", ini_pop=[], seed=1)\n sim.simulate(5, 1)\n with pytest.raises(RuntimeError):\n sim.make_movie()\n\n\n@pytest.fixture\ndef figfile_root():\n \"\"\"Provide name for figfile root and delete figfiles after test completes\n \"\"\"\n ffroot = os.path.join(\".\", \"dv\")\n yield ffroot\n for f in glob.glob(ffroot + \"_0*.png\"):\n os.remove(f)\n\n\ndef test_simulation_make_movie_mp4(figfile_root):\n \"\"\"Test to see that movie can be made with mp4 format\"\"\"\n # Kommentar: Går greit å feile med tox\n sim = BioSim(\n island_map=\"OO\\nOO\", ini_pop=[], seed=1, img_base=figfile_root\n )\n sim.simulate(5, 1)\n sim.make_movie()\n assert os.path.isfile(figfile_root + \".mp4\")\n\n\ndef test_simulation_make_movie_gif(figfile_root):\n \"\"\"Test to see that movie can be made with gif format\"\"\"\n sim = BioSim(\n island_map=\"OO\\nOO\", ini_pop=[], seed=1, img_base=figfile_root\n )\n sim.simulate(5, 1)\n sim.make_movie(movie_fmt=\"gif\")\n assert os.path.isfile(figfile_root + \".gif\")\n\n\ndef test_simulation_large_island():\n \"\"\"Test to see that a island with column or row length bigger than 23\n initialize self._large_island.\n \"\"\"\n map = \"\"\"\\\n OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO\n ODDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDO\n ODDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDO\n ODDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDO\n ODDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDO\n ODDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDO\n ODDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDO\n ODDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDO\n OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO\"\"\"\n sim = BioSim(island_map=map, ini_pop=[], seed=1)\n sim.simulate(1, 1)\n assert sim._large_island\n","sub_path":"tests/test_simulation.py","file_name":"test_simulation.py","file_ext":"py","file_size_in_byte":2736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"38554807","text":"import mock\n\nfrom promstats import Stats\n\n\nclass AsyncTestMixin:\n \"\"\"Implements tests for py3.5-only functionality.\"\"\"\n\n def test_asynctimed(self):\n\n import asyncio\n loop = asyncio.get_event_loop()\n\n stats = Stats('greasyspoon')\n ret_value = mock.Mock()\n\n @stats.asynctimed(\n 'bacon',\n ['sausage:eggs', 'ham:scrambled_eggs'],\n verbose_name='Number of eggs in a basket.')\n async def measured():\n await asyncio.sleep(1)\n return ret_value\n\n result = loop.run_until_complete(\n asyncio.ensure_future(measured(), loop=loop))\n\n self.assertIs(result, ret_value)\n bacon_metric = stats.metrics.get('bacon')\n\n self.assertEqual(bacon_metric._type, 'histogram')\n self.assertSetEqual(set(bacon_metric._labelnames), {'sausage', 'ham'})\n\n # The last entry in samples - a concrete timing.\n self.assertGreater(bacon_metric.collect()[0].samples[-1][-1], 1)\n","sub_path":"tests/_async.py","file_name":"_async.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"499755074","text":"from django.shortcuts import render\nfrom django.views import generic\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom .models import AwGrape\nfrom .forms import AwGrapeForm\nfrom datetime import datetime\nfrom datetime import date\nfrom django.urls import reverse_lazy\nfrom django.core.files.base import ContentFile\nimport base64\n# Create your views here.\n\n\n@method_decorator(login_required , name=\"dispatch\")\nclass ManageGrapeView(SuccessMessageMixin,generic.ListView):\n queryset = AwGrape.objects.all().order_by(\"-id\")\n template_name = 'admin/grape/index.html'\n\n def get_context_data(self, *args,**kwargs):\n context = super(ManageGrapeView,self).get_context_data(*args,**kwargs)\n context['Page_title'] = \"Manage Grape\"\n print(context)\n return context\n\n@method_decorator(login_required , name=\"dispatch\")\nclass CreateGrapeView(SuccessMessageMixin,generic.CreateView):\n form_class = AwGrapeForm\n template_name = 'admin/grape/create.html'\n\n\n def get_success_message(self, cleaned_data):\n print(cleaned_data)\n return \"Grape add successfully.\"\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['Page_title'] = \"Add Grape\"\n return context\n\n def form_valid(self, form):\n self.object = form.save(commit=False)\n self.object.Created_by = self.request.user\n self.object.Updated_by = self.request.user\n # print(\"=================\")\n if self.request.POST[\"Grape_Image\"]:\n format, imgstr = self.request.POST[\"Grape_Image\"].split(';base64,')\n ext = format.split('/')[-1]\n dateTimeObj = datetime.now()\n today_date = date.today()\n set_file_name = str(today_date.day) + \"_\" + str(today_date.month) + \"_\" + str(today_date.year) + \"_\" +str(dateTimeObj.microsecond)\n file_name = set_file_name + \".\" + ext\n data = ContentFile(base64.b64decode(imgstr), name=file_name)\n self.object.Grape_Image = data\n # print(\"===============\")\n self.object.save()\n form.save_m2m()\n return super().form_valid(form)\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass GrapeUpdateView(SuccessMessageMixin, generic.UpdateView):\n form_class = AwGrapeForm\n template_name = 'admin/grape/create.html'\n queryset = AwGrape.objects.all()\n\n def get_success_message(self, cleaned_data):\n print(cleaned_data)\n return \"Grape update successfully.\"\n\n def get_context_data(self, **kwargs):\n # Call the base implementation first to get a context\n context = super().get_context_data(**kwargs)\n # Add in a QuerySet of all the books\n context['Page_title'] = \"Edit Grape\"\n\n return context\n\n def form_valid(self, form):\n self.object = form.save(commit=False)\n self.object.Updated_by = self.request.user\n # print(\"=================\")\n if self.request.POST[\"Grape_Image\"]:\n format, imgstr = self.request.POST[\"Grape_Image\"].split(';base64,')\n ext = format.split('/')[-1]\n dateTimeObj = datetime.now()\n today_date = date.today()\n set_file_name = str(today_date.day) + \"_\" + str(today_date.month) + \"_\" + str(today_date.year) + \"_\" + str(\n dateTimeObj.microsecond)\n file_name = set_file_name + \".\" + ext\n data = ContentFile(base64.b64decode(imgstr), name=file_name)\n self.object.Grape_Image = data\n self.object.Updated_date = datetime.now()\n self.object.save()\n form.save_m2m()\n return super().form_valid(form)\n\n\nclass GrapeDeleteView(SuccessMessageMixin,generic.DeleteView):\n model = AwGrape\n template_name = 'admin/grape/delete.html'\n success_url = reverse_lazy('admin_manage_grape:grape')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['Page_title'] = \"Delete Grape\"\n return context\n\n def get_success_message(self, cleaned_data):\n print(cleaned_data)\n return \"Grape remove successfully.\"","sub_path":"aromawine3-new_update__with_checkout/admin_manage_grape/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"462927878","text":"from django import template\r\nfrom django.contrib.auth.models import Group \r\n\r\nregister = template.Library()\r\n\r\n#using has_group definition to validate users are a member of the correct group\r\n#prior to serving them certain pages and functionality, the \"call\" is in each template\r\n#page as needed.\r\n#more information about how to do this can be found at:\r\n#https://docs.djangoproject.com/en/2.2/howto/custom-template-tags/\r\n\r\n@register.filter(name='has_group')\r\ndef has_group(user, group_name): \r\n group = Group.objects.get(name=group_name) \r\n return True if group in user.groups.all() else False\r\n","sub_path":"risk_registry/risk_registries/templatetags/auth_extras.py","file_name":"auth_extras.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"619256123","text":"from typing import List\n\nclass Solution:\n def lengthOfLastWord(self, s: str) -> int:\n l = 0\n s = s[::-1].strip()\n\n for c in s:\n if c == \" \":\n break\n else:\n l += 1\n\n return(l)\n\ntestcase = 'a '\nmysol = Solution() \nprint(mysol.lengthOfLastWord(testcase))\n ","sub_path":"58_LengthLastWord/3rdRightToLeft.py","file_name":"3rdRightToLeft.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"321902828","text":"# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nfrom textwrap import dedent\n\nfrom pants.backend.helm.util_rules.chart_metadata import DEFAULT_API_VERSION, ChartType\n\n\ndef gen_chart_file(\n name: str,\n *,\n version: str,\n description: str | None = None,\n type: ChartType = ChartType.APPLICATION,\n api_version: str = DEFAULT_API_VERSION,\n icon: str | None = None,\n) -> str:\n metadata_yaml = dedent(\n f\"\"\"\\\n apiVersion: {api_version}\n name: {name}\n version: {version}\n type: {type.value}\n \"\"\"\n )\n if description:\n metadata_yaml += f\"description: {description}\\n\"\n if icon:\n metadata_yaml += f\"icon: {icon}\\n\"\n return metadata_yaml\n\n\nHELM_CHART_FILE = gen_chart_file(\"mychart\", version=\"0.1.0\")\n\nHELM_CHART_WITH_DEPENDENCIES_FILE = dedent(\n \"\"\"\\\n apiVersion: v2\n name: mychart\n description: A Helm chart for Kubernetes\n version: 0.1.0\n icon: https://www.example.com/icon.png\n dependencies:\n - name: other_chart\n repository: \"@myrepo\"\n version: \"~0.1.0\"\n alias: dependency_alias\n \"\"\"\n)\n\nHELM_CHART_FILE_V1_FULL = dedent(\n \"\"\"\\\n name: foo\n version: 0.1.0\n kubeVersion: 1.17\n description: The foo chart\n keywords:\n - foo\n - chart\n home: https://example.com\n sources:\n - https://example.com/git\n dependencies:\n - name: bar\n version: 0.2.0\n repository: https://example.com/repo\n condition: bar.enabled\n tags:\n - foo\n - bar\n import-values:\n - data\n alias: bar-alias\n maintainers:\n - name: foo\n email: bar@example.com\n url: https://example.com/foo\n icon: https://example.com/icon.png\n appVersion: 0.1.0\n deprecated: true\n annotations:\n example: yes\n name: foo\n \"\"\"\n)\n\nHELM_CHART_FILE_V2_FULL = dedent(\n \"\"\"\\\n apiVersion: v2\n name: quxx\n version: 0.1.0\n kubeVersion: 1.17\n description: The foo chart\n type: library\n keywords:\n - foo\n - chart\n home: https://example.com\n sources:\n - https://example.com/git\n dependencies:\n - name: bar\n version: 0.2.0\n repository: https://example.com/repo\n condition: bar.enabled\n tags:\n - foo\n - bar\n import-values:\n - data\n alias: bar-alias\n maintainers:\n - name: foo\n email: bar@example.com\n url: https://example.com/foo\n icon: https://example.com/icon.png\n appVersion: 0.1.0\n deprecated: true\n annotations:\n example: yes\n name: quxx\n \"\"\"\n)\n\nK8S_SERVICE_FILE = dedent(\n \"\"\"\\\n apiVersion: v1\n kind: Service\n metadata:\n name: {{ template \"fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n spec:\n type: {{ .Values.service.type }}\n ports:\n - port: {{ .Values.service.externalPort }}\n targetPort: {{ .Values.service.internalPort }}\n protocol: TCP\n name: {{ .Values.service.name }}\n selector:\n app: {{ template \"fullname\" . }}\n \"\"\"\n)\n\nK8S_INGRESS_FILE_WITH_LINT_WARNINGS = dedent(\n \"\"\"\\\n apiVersion: extensions/v1beta1\n kind: Ingress\n metadata:\n name: {{ template \"fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n spec:\n rules:\n - host: example.com\n http:\n paths:\n - path: /\n pathType: Prefix\n backend:\n service:\n name: {{ template \"fullname\" . }}\n port:\n name: http\n \"\"\"\n)\n\nK8S_POD_FILE = dedent(\n \"\"\"\\\n apiVersion: v1\n kind: Pod\n metadata:\n name: {{ template \"fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n spec:\n containers:\n - name: myapp-container\n image: busybox:1.28\n initContainers:\n - name: init-service\n image: busybox:1.29\n \"\"\"\n)\n\nHELM_TEMPLATE_HELPERS_FILE = dedent(\n \"\"\"\\\n {{- define \"fullname\" -}}\n {{- if .Values.fullnameOverride }}\n {{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" }}\n {{- else }}\n {{- $name := default .Chart.Name .Values.nameOverride }}\n {{- if contains $name .Release.Name }}\n {{- .Release.Name | trunc 63 | trimSuffix \"-\" }}\n {{- else }}\n {{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" }}\n {{- end }}\n {{- end }}\n {{- end }}\n \"\"\"\n)\n\nHELM_VALUES_FILE = dedent(\n \"\"\"\\\n service:\n name: test\n type: ClusterIP\n externalPort: 80\n internalPort: 1223\n \"\"\"\n)\n","sub_path":"src/python/pants/backend/helm/testutil.py","file_name":"testutil.py","file_ext":"py","file_size_in_byte":4483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"289905424","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 19 09:45:40 2018\n\n@author: felipe\n\"\"\"\n\nimport numpy as np\nimport scipy as sp\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom ERANataf import ERANataf\nfrom ERADist import ERADist\nfrom aBUS_SuS import aBUS_SuS\nmatplotlib.rcParams.update({'font.size': 22})\nmatplotlib.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})\nmatplotlib.rcParams['text.usetex'] = True\nplt.close('all')\n\n# %% prior\nn = 1 # number of random variables (dimensions)\n\n# assign data: 1st variable is normal\nmu_x = 0\nsigma_x = 1\ndist_x1 = ERADist('normal','PAR',[mu_x, sigma_x])\n\n# distributions\ndist_X = [dist_x1]\n\n# correlation matrix\nR = np.eye(n) # independent case\n\n# object with distribution information\nT_nataf = ERANataf(dist_X,R)\n\n# %% likelihood\ny_tilde = 2\nmu_nu = 0\nsigma_nu = 0.5\nlikelihood = lambda x: sp.stats.norm.pdf(y_tilde-x, loc=mu_nu, scale=sigma_nu)\nlog_likelihood = lambda x: np.log(likelihood(x))\n\n# %% c constant\nc = 0.5*np.sqrt(2*np.pi)\nc_hat = 1/c\n\n# %% BUS-SuS\nN = 2000 # number of samples per level\np0 = 0.1 # probability of each subset\n\n# run the BUS_SuS.m function\n[b,samplesU,samplesX,cE,c,lam] = aBUS_SuS(N,p0,log_likelihood,T_nataf)\n \n# %% results\nmu_exact = 1.6 # if sigma_nu = 0.5\nsigma_exact = 0.45 # if sigma_nu = 0.5\nmu_xp = np.mean(samplesX[-1][0,:])\nsigma_xp = np.std(samplesX[-1][0,:])\nprint('\\nExact mean',mu_exact,'\\nExact std:',sigma_exact,'\\n')\nprint('Sample mean',mu_xp,'\\nSample std:',sigma_xp,'\\n')\n\n# %% END","sub_path":"03_aBUS/python/main_example_1.py","file_name":"main_example_1.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"452192054","text":"#######################################################\n#\n# FileName:normalize.py\n# date:2016/4/30\n# author: Taiki Kadomae\n#\n#######################################################\n# encoding: utf8\nfrom __future__ import unicode_literals\nimport re, sys, gzip\nimport unicodedata\n\ndef unicode_normalize(cls, s):\n pt = re.compile('([{}]+)'.format(cls))\n\n def norm(c):\n return unicodedata.normalize('NFKC', c) if pt.match(c) else c\n\n s = ''.join(norm(x) for x in re.split(pt, s))\n return s\n\ndef remove_extra_spaces(s):\n s = re.sub('[  ]+', ' ', s)\n blocks = ''.join(('\\u4E00-\\u9FFF', # CJK UNIFIED IDEOGRAPHS\n '\\u3040-\\u309F', # HIRAGANA\n '\\u30A0-\\u30FF', # KATAKANA\n '\\u3000-\\u303F', # CJK SYMBOLS AND PUNCTUATION\n '\\uFF00-\\uFFEF' # HALFWIDTH AND FULLWIDTH FORMS\n ))\n basic_latin = '\\u0000-\\u007F'\n\n def remove_space_between(cls1, cls2, s):\n p = re.compile('([{}]) ([{}])'.format(cls1, cls2))\n while p.search(s):\n s = p.sub(r'\\1\\2', s)\n return s\n\n s = remove_space_between(blocks, blocks, s)\n s = remove_space_between(blocks, basic_latin, s)\n s = remove_space_between(basic_latin, blocks, s)\n return s\n\ndef normalize_neologd(s):\n s = s.strip()\n s = unicode_normalize('0-9A-Za-z。-゚', s)\n\n def maketrans(f, t):\n return {ord(x): ord(y) for x, y in zip(f, t)}\n\n s = s.translate(\n maketrans('!\"#$%&\\'()*+,-./:;<=>?@[\\]^_`{|}~。、・「」',\n '!”#$%&’()*+,?./:;<=>?@[¥]^_`{|}?。、・「」'))\n s = re.sub('[??‐???????]+', '-', s) # normalize hyphens\n s = re.sub('[?-ー?―─━ー]+', 'ー', s) # normalize choonpus\n s = re.sub('[~????~]', '', s) # remove tildes\n s = remove_extra_spaces(s)\n s = unicode_normalize('!”#$%&’()*+,?./:;<>?@[¥]^_`{|}?', s) # keep =,・,「,」\n s = re.sub('[’]', '\\'', s)\n s = re.sub('[”]', '\"', s)\n return s\n\ndef main():\n\tgzfile = gzip.open(sys.argv[1], 'rt')\n\tline = gzfile.readline()\n\twhile line:\n\t\tprint(normalize_neologd(line))\n\t\tline = gzfile.readline() \n\t \nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"assignment_2_7/normalize.py","file_name":"normalize.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"404936903","text":"import logging\nimport pymysql\n\n\nclass KDCdb(object):\n def __init__(self):\n self.host = '127.0.0.1'\n self.user = 'username'\n self.password = 'yourpwd'\n self.database = 'dbname'\n self.charset = 'utf8'\n\n def connect_kdc_db(self):\n try:\n return pymysql.connect(host=self.host, user=self.user,\n password=self.password, database=self.database,\n charset=self.charset)\n except (ConnectionRefusedError, pymysql.err.OperationalError):\n logging.error('Connect operation error')\n return False\n\n def create_table_tb(self):\n if self.connect_kdc_db():\n conn = self.connect_kdc_db()\n cursor = conn.cursor()\n try:\n sql = \"\"\"CREATE TABLE kdc_tb (\n id INT auto_increment PRIMARY KEY,\n u_name VARCHAR(20) NOT NULL UNIQUE,\n pwd VARCHAR(40) NOT NULL);\"\"\"\n\n cursor.execute(sql)\n\n except (pymysql.err.OperationalError, pymysql.err.InternalError):\n logging.error('Create operation error - Table already exists')\n\n finally:\n cursor.close()\n conn.close()\n else:\n return 0\n\n def create_table_lg(self):\n if self.connect_kdc_db():\n conn = self.connect_kdc_db()\n cursor = conn.cursor()\n try:\n sql = \"\"\"CREATE TABLE kdc_login (\n id INT auto_increment PRIMARY KEY,\n u_name VARCHAR(20) NOT NULL,\n pwd VARCHAR(40) NOT NULL);\"\"\"\n\n cursor.execute(sql)\n\n except (pymysql.err.OperationalError, pymysql.err.InternalError):\n logging.error('Create operation error - Table already exists')\n\n finally:\n cursor.close()\n conn.close()\n else:\n return 0\n\n def insert_data(self, user, pwd, tb_name):\n if self.connect_kdc_db():\n conn = self.connect_kdc_db()\n cursor = conn.cursor()\n\n try:\n sql = \"INSERT INTO \" + tb_name + \" (u_name, pwd) VALUES ('%s', '%s')\" % (user, pwd)\n\n cursor.execute(sql)\n conn.commit()\n\n except pymysql.err.IntegrityError:\n conn.rollback()\n logging.error('Insert operation error - Duplicate entry')\n\n except pymysql.err.DataError:\n conn.rollback()\n logging.error('Insert operation error - Data too long')\n\n else:\n print('[+] Insert success')\n\n finally:\n cursor.close()\n conn.close()\n else:\n return 0\n\n def query_data(self, user, tb_name):\n if self.connect_kdc_db():\n conn = self.connect_kdc_db()\n cursor = conn.cursor()\n\n try:\n sql = \"SELECT pwd FROM \" + tb_name + \" where u_name = '%s'\" % user\n\n cursor.execute(sql)\n pwd = cursor.fetchall()[-1] # 选择最近插入的数据\n\n return pwd\n\n except pymysql.err.InternalError:\n logging.error('Query operation error - Unknown column')\n\n finally:\n cursor.close()\n conn.close()\n\n else:\n return 0\n\n def delete_data(self, user):\n if self.connect_kdc_db():\n conn = self.connect_kdc_db()\n cursor = conn.cursor()\n\n try:\n sql = \"DELETE FROM kdc_tb where u_name = '%s'\" % user\n\n cursor.execute(sql)\n conn.commit()\n\n except pymysql.err.OperationalError:\n conn.rollback()\n logging.exception('Delete operation error')\n\n else:\n print('[+] Delete success')\n\n finally:\n cursor.close()\n conn.close()\n else:\n return 0\n\n\nif __name__ == '__main__':\n kdb = KDCdb()\n kdb.create_table_lg()\n","sub_path":"Database/OptDatabase.py","file_name":"OptDatabase.py","file_ext":"py","file_size_in_byte":4115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"204323366","text":"import wpilib.command\nimport ctre\nfrom oi import getJoystick\n\nclass Grabber(wpilib.command.Command):\n def __init__(self):\n super().__init__(\"Grabber\")\n self.intake = self.getRobot().intake\n self.requires(self.intake)\n\n\n def execute(self):\n joystick = getJoystick()\n closeArm_trigger = joystick.getRawAxis(3) #Right Trigger\n openArm_trigger = joystick.getRawAxis(2) #Left Trigger\n if (closeArm_trigger < 0): # right trigger triggered\n self.intake.motor_closeOpen_set(-1) #set to full reverse power\n elif (openArm_trigger < 0): #left trigger triggered\n self.intake.motor_closeOpen_set(1) #set to full forward power\n else:\n self.intake.motor_closeOpen_set(0) #turn off motor","sub_path":"src/commands/grabber.py","file_name":"grabber.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"56305741","text":"from optparse import OptionParser\nfrom classDef import ReadData\nfrom classDef import Parameters\n\n\ndef opt_parser(argv):\n parser = OptionParser()\n parser.add_option(\n \"-p\", \"--parameter-file\", action=\"store\", type=\"string\",\n dest=\"parameter\", default=\"\",\n help=\"provide a file that contain the parameters\",\n metavar=\"PARAMETER-FILE\")\n parser.add_option(\n \"-c\", \"--chip1\", action=\"store\", type=\"string\", dest=\"chip1\",\n default=\"\", help=\"chip1 file names separated by comma\",\n metavar=\"CHIP1\")\n parser.add_option(\n \"-i\", \"--input1\", action=\"store\",\n type=\"string\", dest=\"input1\", default=\"\",\n help=\"input1 file names separated by comma\",\n metavar=\"INPUT1\")\n parser.add_option(\n \"--chip2\", action=\"store\", type=\"string\", dest=\"chip2\",\n default=\"\", help=\"chip2 file names separated by comma\",\n metavar=\"CHIP2\")\n parser.add_option(\n \"--input2\", action=\"store\", type=\"string\", dest=\"input2\",\n default=\"\", help=\"input2 file names separated by comma\",\n metavar=\"INPUT2\")\n parser.add_option(\n \"-f\", \"--file-format\", action=\"store\", type=\"string\",\n dest=\"file_format\",\n help=\"bed, sam, bam, eland_multi, eland_extended, bowtie...\",\n metavar=\"FORMAT\")\n parser.add_option(\n \"-s\", \"--shiftsize\", action=\"store\",\n type=\"string\", dest=\"shift_size\", default=\"-1\",\n help=\"Half the fragment size.\", metavar=\"SHIFTSIZE\")\n parser.add_option(\n \"-w\", \"--windowsize\", action=\"store\",\n type=\"int\", dest=\"window_size\", default=-1,\n help=\"Window sizes\",\n metavar=\"WINDOWSIZE\")\n parser.add_option(\n \"--diff\", action=\"store_true\",\n dest = \"difftest\", default=False,\n help=\"Perform differential binding instead of peak-calling\")\n parser.add_option(\n \"-n\", \"--name\", action = \"store\",\n type=\"string\", dest=\"name\", default = \"NA\",\n help = \"the experimental name. NA if none provided\",\n metavar=\"NAME\")\n parser.add_option(\n \"-r\", \"--remove_duplicate\", action =\"store_true\",\n dest = \"remove_redundant\", default=False,\n help=\"Remove duplicated reads\")\n parser.add_option(\n \"--threshold\", action =\"store\",\n type='float', dest=\"threshold\", default=1e-5,\n help=\"p-value threshold. Default 1e-5.\")\n parser.add_option(\n \"--peaktype\", action=\"store\",\n type=\"string\", dest=\"peaktype\", default=\"broad\",\n help=\"sharp or broad. Default broad.\")\n parser.add_option(\n \"--remove_artefacts\", action=\"store_true\",\n dest=\"remove_artefacts\", default=False,\n help = 'Remove PCR-duplication peaks in post-procesing')\n parser.add_option(\n \"--narrow_peak_width\", action=\"store_true\",\n dest =\"narrow_peak_width\", default=False,\n help = '''Narrow peak width to contain the most\n enriched regions. Only available for SHARP peak type''')\n parser.add_option(\n \"--custom_normalization\", action=\"store\",\n type=\"string\", dest=\"normalization\", default=\"YES\", \n help='''You can choose not to normalize the reads by \n specifying 'NO' or provide your own normalization \n constants separated by commas.''')\n parser.add_option(\n \"--no_log\", action=\"store_true\",\n dest = \"unsave_log\", default=False,\n help = \"Disable saving the log files\")\n (opt, args)=parser.parse_args(argv)\n if len(argv)==1:\n parser.print_help()\n exit(1)\n return opt\n\ndef process_opt(opt):\n ''' validate the parameters that the user specified'''\n # initial process the filenames.\n opt.chip1 = opt.chip1.strip().split(',')\n opt.chip2 = opt.chip2.strip().split(',')\n opt.input1 = opt.input1.strip().split(',')\n opt.input2 = opt.input2.strip().split(',')\n parameter = Parameters(opt)\n\n ## initialize the data structure\n read_data = ReadData(\n opt.chip1, opt.input1,\n opt.chip2, opt.input2,\n parameter.difftest\n )\n #add shift size validations\n return parameter, read_data\n","sub_path":"PePr/optParser.py","file_name":"optParser.py","file_ext":"py","file_size_in_byte":4354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"115631617","text":"\"\"\"\nScript for converting nii to 2D slices in .jpg format in python\n\n\n\n\nAuthor: Xujuan Sun / Yucheng Tang\nData: July 4, 2018\n\n\"\"\"\n\n\nimport os\nimport os.path as path\nimport argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nfrom multiprocessing.pool import Pool\nimport nibabel as nib\nimport scipy\nimport scipy.misc\n\nparser = argparse.ArgumentParser(\n description=\"Generate 2D slices from nii(nifity)\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n)\nparser.add_argument(\n \"--volume_dir\", default='',\n help=\"The folder that contains all the nii volumes\"\n)\nparser.add_argument(\n \"--image_dir\", default='',\n help=\"The folder that contains all 2D slices\"\n)\nparser.add_argument(\n \"--num_workers\", type=int, default=11,\n help=\"Number of processing workers\"\n)\nargs = parser.parse_args()\n\nnii_files = [\n d for d in os.listdir(args.volume_dir) if d.endswith('.nii')\n]\ncount = 0\ntotal_len = len(nii_files)\nfor nii in nii_files:\n count += 1\n print(\"[{}/{}] Converting {}\".format(count, total_len, nii))\n output_dir = os.path.join(args.image_dir, nii)\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir)\n nii_path = os.path.join(args.volume_dir, nii)\n img = nib.load(nii_path)\n img_array = np.array(img.get_fdata())\n for i in range(img_array.shape[2]):\n cur_slice = img_array[:,:,i]\n cur_slice_file = os.path.join(output_dir, \"slice_{}.png\".format(str(i).zfill(4)))\n cur_slice_image = scipy.misc.toimage(cur_slice, high = np.max(cur_slice), \n low=np.min(cur_slice),mode='L') \n cur_slice_image.save(cur_slice_file)\nprint(\"Done\")\n\n","sub_path":"nii2jpgslices.py","file_name":"nii2jpgslices.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"115526216","text":"\"\"\"Add metaproject parameters\n\nRevision ID: 544c251b39b3\nRevises: 0a220a216d4a\nCreate Date: 2019-09-09 10:57:26.570000\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = \"544c251b39b3\"\ndown_revision = \"0a220a216d4a\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n conn = op.get_bind()\n conn.execute(\"ALTER TABLE `PROJECT` ADD COLUMN `PARQUET_PATH` varchar(1000);\")\n conn.execute(\"ALTER TABLE `PROJECT` ADD COLUMN `PARQUET_SIZE` BIGINT UNSIGNED;\")\n\n\ndef downgrade():\n conn = op.get_bind()\n conn.execute(\n \"ALTER TABLE `PROJECT` DROP COLUMN `PARQUET_PATH`, DROP COLUMN `PARQUET_SIZE`;\"\n )\n","sub_path":"alembic/versions/544c251b39b3_add_metaproject_parameters.py","file_name":"544c251b39b3_add_metaproject_parameters.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"451925798","text":"import json\nimport operator\n\nfrom src.reversi_zero.lib import elo as elo_lib\nfrom logging import getLogger\n\nimport time\n\nfrom src.reversi_zero.config import Config\nfrom src.reversi_zero.lib import elo as elo_lib\nfrom src.reversi_zero.lib.pipe_helper import PipeFilesManager, reverse_in_out\nfrom src.reversi_zero.lib.proc_helper import build_child_cmd, start_child_proc\n\nlogger = getLogger(__name__)\n\n\ndef start(config):\n return LeagueWorker(config).start()\n\nfrom collections import namedtuple\nPlayer = namedtuple('Player', 'name, config weight n_sims')\n\n\ndef get_player(generation, n_sims):\n return Player(\n name=f'{generation}_{n_sims}',\n config=f'/fds/exp/alpha-zero/reversi/data/model/generation_models/model_{generation}-steps/model_config.json',\n weight=f'/fds/exp/alpha-zero/reversi/data/model/generation_models/model_{generation}-steps/model_weight.h5',\n n_sims=n_sims\n )\n\nPLAYERS = [\n # get_player(0, 100),\n # get_player(0, 800),\n # get_player(0, 2000),\n # get_player(56800, 100),\n # get_player(56800, 800),\n # get_player(56800, 2000),\n # get_player(116000, 100),\n # get_player(116000, 800),\n # get_player(116000, 2000),\n # get_player(200800, 100),\n # get_player(200800, 800),\n # get_player(200800, 2000),\n # get_player(263200, 100),\n # get_player(263200, 800),\n # get_player(263200, 2000),\n # get_player(302400, 40),\n # get_player(302400, 100),\n # get_player(302400, 800),\n # get_player(302400, 2000),\n # get_player(304800, 40),\n # get_player(304800, 100),\n # get_player(304800, 800),\n # get_player(304800, 2000),\n # get_player(312000, 40),\n # get_player(312000, 100),\n # get_player(312000, 800),\n # get_player(312000, 2000),\n # get_player(314400, 40),\n # get_player(314400, 100),\n # get_player(314400, 400),\n # get_player(314400, 800),\n # get_player(314400, 2000),\n # get_player(336000, 40),\n # get_player(336000, 100),\n # get_player(336000, 400),\n get_player(350400, 40),\n get_player(350400, 100),\n get_player(350400, 400),\n # get_player(360000, 40),\n # get_player(360000, 100),\n # get_player(360000, 400),\n get_player(386400, 40),\n get_player(386400, 100),\n get_player(386400, 400),\n get_player(391200, 40),\n get_player(391200, 100),\n get_player(391200, 400),\n]\n\n\nclass LeagueWorker:\n def __init__(self, config: Config):\n self.config = config\n self.players = PLAYERS\n self.pipe_files = PipeFilesManager.new_one(self.config)\n self.n_games = 4\n self.result_file = self.config.opts.league_result\n\n def start(self):\n import itertools\n games = itertools.combinations(reversed(self.players), 2)\n results = {}\n try:\n import os\n if os.path.exists(self.result_file):\n with open(self.result_file, 'rt') as f:\n results = json.load(f)\n except Exception as e:\n logger.debug(e)\n\n self.print_result(results)\n for p1, p2 in games:\n if not p1.name < p2.name:\n p1, p2 = p2, p1\n key = f'{p1.name}_vs_{p2.name}'\n if key in results:\n continue\n logger.info(key)\n r = self.vs(p1, p2, self.n_games)\n results[key] = r\n\n self.print_result(results)\n with open(self.result_file, 'wt') as f:\n json.dump(results, f)\n\n limited_results = {}\n names = [p.name for p in self.players]\n for k in results:\n p1, p2 = k.split('_vs_')\n if p1 in names and p2 in names:\n limited_results[k] = results[k]\n results = limited_results\n\n elo = {}\n expected = {}\n actual = {}\n for k in results:\n p1, p2 = k.split('_vs_')\n elo[p1] = 0\n elo[p2] = 0\n expected[p1] = 0\n expected[p2] = 0\n actual[p1] = 0\n actual[p2] = 0\n\n for k in results:\n p1, p2 = k.split('_vs_')\n w,d,l = int(results[k][0]), int(results[k][1]), int(results[k][2])\n\n expected[p1] += elo_lib.expected(elo[p1], elo[p2]) * (w+d+l)\n expected[p2] += elo_lib.expected(elo[p2], elo[p1]) * (w+d+l)\n actual[p1] += w+d*0.5\n actual[p2] += l+d*0.5\n\n for p in elo:\n elo[p] = elo_lib.elo(elo[p], expected[p], actual[p], self.config.opts.elo_k)\n\n self.print_result(results)\n self.print_elo(elo)\n\n\n @staticmethod\n def print_elo(elo):\n logger.info('===========ELO==============')\n elo = [x for x in reversed(sorted(elo.items(), key=operator.itemgetter(1)))]\n for k in elo:\n logger.info(f'{k[0]:15} : {k[1]}')\n logger.info('============================')\n\n @staticmethod\n def print_result(result):\n logger.info('=========VS RESULTS=========')\n for k in result:\n logger.info(f'{k:30} : {result[k]}')\n logger.info('============================')\n\n def vs(self, player1 : Player, player2 : Player, n_games):\n\n pipe_pairs = self.pipe_files.make_pipes(1)\n cmd = build_child_cmd(type='versus_n_games', config=self.config, pipe_pairs=reverse_in_out(pipe_pairs))\n cmd.extend([\n '--n-games', f'{n_games}',\n '--n-workers', f'{self.config.opts.n_workers}',\n '--p1-n-sims', f'{player1.n_sims}',\n \"--p1-model-config-path\", player1.config,\n \"--p1-model-weight-path\", player1.weight,\n '--p2-n-sims', f'{player2.n_sims}',\n \"--p2-model-config-path\", player2.config,\n \"--p2-model-weight-path\", player2.weight,\n ])\n\n pipe_pairs[0].open_read_nonblock()\n p = start_child_proc(cmd=cmd).wait()\n\n result = pipe_pairs[0].read_no_empty()\n assert result\n result = result.decode()\n result = result.split(',')\n result = [int(x) for x in result]\n\n pipe_pairs[0].close_read()\n self.pipe_files.clear_pipes()\n\n return result\n\n\n","sub_path":"src/reversi_zero/worker/league.py","file_name":"league.py","file_ext":"py","file_size_in_byte":6117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"50492787","text":"from Tiled.TiledMap import TiledObjectItem\n#from Utils.ViewModelBase2 import ViewModelBase2\n\nclass MapObjectBase(object):\n \"\"\"The map object base class.\"\"\"\n def __init__(self):\n self._parent = None\n self._name = None\n self._properties = None\n self._x = None\n self._y = None\n self._width = None\n self._height = None\n \n\n def configure(self, configuration):\n \"\"\"Configure the object from tmx object layer configuration.\"\"\"\n assert isinstance(configuration, TiledObjectItem), \"Expected config to be TiledObjectItem.\"\n\n self._name = configuration.name\n self._x = configuration.x\n self._y = configuration.y\n self._width = configuration.width\n self._height = configuration.height\n self._properties = configuration.properties\n pass\n\n def initializeObject(self, parent):\n #assert isinstance(parent, ViewModelBase2), \"Expected parent to be of type ViewModelBase2.\"\n self._parent = parent\n\n\n\n","sub_path":"SimpleGame/SimpleGame/Src/Utils/MapObjectBase.py","file_name":"MapObjectBase.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"211298248","text":"def binary_search(list,item):\n \"\"\"\n binary_search\n receive a sorted array and the key item, ouput the index of that item\n roseau\n 2019/9/23\n \"\"\"\n low = 0\n high = len(list) - 1\n\n while low <= high:\n mid = int((low + high) / 2) # int() to Round down\n guess = list[mid]\n\n if guess == item:\n return mid\n if guess > item:\n high = mid - 1\n else:\n low = mid + 1\n return 'Not fonud'\n\n\nif __name__ == '__main__':\n mylist = list(range(1,20,2))\n print(\"input string is:\" , mylist)\n print('19 is ' , binary_search(mylist,19))\n print('9 is ' , binary_search(mylist,9))\n print('200 is ' , binary_search(mylist,200))\n\n\n","sub_path":"Algorithm/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"405953637","text":"#Calculadora de números de un digito\na = input('Ingresa números enteros a sumar de un solo digito con su signo>>')\n\np = 0 # suma de numeros negativos\nu = 0 #suma de numeros positivos\nd =-1\n\n\"\"\"\nSi el usuario ingresa una lista de números\ny el primer número es positivo se ejecuta la \nlinea del 14 al 23\n\nSi el usuario ingresa una lista de números\ny el primer número es negativo se ejecuta la \nlinea del 29 al 39\n\"\"\"\n\nif a[0] != '-':\n\tfor i in range(1,len(a)): \n\t\tif a[i].isdigit():\n\t\t\tb = a[i-1]\n\t\t\tk = int(a[i])\n\t\t\t\n\t\t\tif b == '-':\n\t\t\t\tp+=k\n\t\t\telif b:\n\t\t\t\tu+=k + int(a[0])\nelse:\n\tfor i in a:\n\t\td+=1\n\t\tif i.isdigit():\n\t\t\tb = a[d-1] #idexación de signos desde el 0\n\t\t\tk = int(a[d]) #indexación de numeros desde el 1\n\t\t\t\n\t\t\tif b == '-':\n\t\t\t\tp+=k\n\t\t\telif b:\n\t\t\t\tu+=k\n\nr = u - p #resultado final mediante el concepto de valor absoluto\nif u > p:\n\tprint(r)\nelse:\n\tprint(r)\n\n\n","sub_path":"Scripts/Math/clcnumundigito.py","file_name":"clcnumundigito.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"420480329","text":"#!/usr/bin/python\n\n\ndef get_number_term(phone):\n '''\n Returns wildcarded version of phonenumber.\n Strips +/00 off of the beginning, and the next\n two digits to account for country codes\n '''\n\n if (phone.startswith('0') and not phone.startswith('00')):\n stripTwo = False\n else:\n stripTwo = True\n\n number = phone.lstrip('+')\n number = number.lstrip('00')\n if stripTwo:\n number = number[2:len(number)]\n term = '%'\n for digit in number:\n term += (digit + \"%\")\n return term\n","sub_path":"app/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"459558576","text":"import paho.mqtt.client as mqtt\nimport re\n\n# The callback for when the client receives a CONNACK response from the server.\nsessionId = 0\nsessionId_set = False\nstartedListening = False\n\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected with result code \"+str(rc))\n\n # Subscribing in on_connect() means that if we lose the connection and\n # reconnect then subscriptions will be renewed.\n #client.subscribe(\"hermes/hotword/#\")\n client.subscribe('hermes/hotword/default/#')\n client.subscribe('hermes/asr/#')\n\n # Hotword bypass\n client.publish('hermes/hotword/default/detected','{\"siteId\":\"default\",\"modelId\":\"hey_snips\",\"modelVersion\":\"hey_snips_3.1_2018-04-13T15:27:35_model_0019\",\"modelType\":\"universal\",\"currentSensitivity\":0.5}', retain=True)\n client.publish('hermes/asr/toggleOff')\n client.publish('hermes/asr/stopListening')\n print(\"Sent hotword\")\n #client.publish('hermes/dialogManager/default/startSession')\n# The callback for when a PUBLISH message is received from the server.\ndef on_message(client, userdata, msg):\n print(\"Received message: {0} {1}\".format(msg.topic, msg.payload))\n #print(\"Received message: {0}\".format(msg.topic))\n\n # To bypass opening ANSWER\n startedListening = 'textCaptured' in msg.topic\n print(msg.topic)\n print(\"startedListening: {}\".format(startedListening))\n\n # To capture sessionId\n has_id = '\"sessionId\"' in str(msg.payload)\n #print(\"Has ID: {}\".format(has_id))\n\n if startedListening:\n print(\"Publishing txtCaptured update with Retrieved sessionId: {}\".format(sessionId))\n client.publish('hermes/asr/textCaptured', '{\"text\":\"start lesson\",\"likelihood\":0.7031782,\"tokens\":[{\"value\":\"start\",\"confidence\":0.92768437,\"range_start\":0,\"range_end\":5,\"time\":{\"start\":0.0,\"end\":2.22}},{\"value\":\"lesson\",\"confidence\":0.53300416,\"range_start\":6,\"range_end\":12,\"time\":{\"start\":2.221435,\"end\":3.36}}],\"seconds\":3.0,\"siteId\":\"default\",\"sessionId\":\"{0}\"}'.format(sessionId))\n startedListening = False\n\n #print(\"--- BEYOND ID\")\n elif has_id and not sessionId_set:\n sessionId = re.search('(?<=sessionId\":\")[a-z,0-9,-]*', str(msg.payload))\n sessionId_set = True\n # Try to hijack text being captured\n #print(\"Stopping the listening real quick\")\n #client.publish('hermes/asr/stopListening', '{\"siteId\":\"default\",\"sessionId\":\"{0}\"}'.format(sessionId))\n\n #print(msg.topic+\" \"+str(msg.payload))\n print(\"### BEYOND LISTENING\")\nclient = mqtt.Client()\nclient.on_connect = on_connect\nclient.on_message = on_message\n\nclient.connect(\"snips-london-2.local\", 1883, 60)\n#client.startSession()\n# Blocking call that processes network traffic, dispatches callbacks and\n# handles reconnecting.\n# Other loop*() functions are available that give a threaded interface and a\n# manual interface.\nclient.loop_forever()\n","sub_path":"tryout/fakeClient.py","file_name":"fakeClient.py","file_ext":"py","file_size_in_byte":2859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"501791717","text":"import os\nimport re\nimport hashlib\nimport base64\nimport daemon\nimport tornado.web\nimport tornado.ioloop\nimport tornado.options\nimport tornado.httpserver\nimport tornado.escape\n\nfrom tornado.options import define, options\n\ndefine(\"port\", default=80, help=\"run on the given port\", type=int)\n\nclass HandleUnknown(tornado.web.RequestHandler):\n def get(self):\n self.write(\"Hello World!\")\n\nclass HandlePing(tornado.web.RequestHandler):\n def get(self):\n gw_id = self.get_argument(\"gw_id\", \"\")\n sys_uptime = self.get_argument(\"sys_uptime\", \"\")\n print(gw_id)\n print(sys_uptime)\n self.write(\"Pong\")\n\nclass HandleLogin(tornado.web.RequestHandler):\n def get(self):\n print(self.request.arguments)\n args={}\n args[\"gw_address\"] = self.get_argument(\"gw_address\", \"\")\n args[\"gw_port\"] = self.get_argument(\"gw_port\", \"2060\")\n args[\"gw_id\"] = self.get_argument(\"gw_id\", \"\")\n args[\"mac\"] = self.get_argument(\"mac\", \"\")\n args[\"url\"] = self.get_argument(\"url\", \"#\")\n print(args)\n\n # check if url is wx auth.\n m = re.search(r\"wx(.+)wx\", args[\"url\"])\n if m != None:\n wxuser = m.group(1)\n print(\"detect a wx pair! wx=<%s>, mac=<%s>\"%(wxuser, args[\"mac\"]))\n\n self.render(\"login.htm\", args=args)\n\nclass HandleAuth(tornado.web.RequestHandler):\n def post(self):\n username = self.get_argument(\"username\",\"\")\n password = self.get_argument(\"password\",\"\")\n gw_address = tornado.escape.url_unescape(self.get_argument(\"gw_address\",\"\"))\n gw_port = tornado.escape.url_unescape(self.get_argument(\"gw_port\",\"\"))\n print(username)\n print(password)\n m = hashlib.md5()\n m.update(username.encode(\"utf8\"))\n m.update(password.encode(\"utf8\"))\n token = base64.b64encode(m.digest())\n print(token)\n\n self.redirect(\"http://%s:%s/wifidog/auth?token=%s\"%(gw_address, gw_port, token))\n\n def get(self):\n arguments = self.request.arguments\n print(arguments)\n self.set_header(\"Auth\",\"1\")\n\nclass HandlePortal(tornado.web.RequestHandler):\n def get(self):\n print(self.request.arguments)\n self.render(\"success.htm\", url=None)\n\ndef main():\n tornado.options.parse_command_line()\n log = open('tornado.' + str(options.port) + '.log', 'a+')\n ctx = daemon.DaemonContext(stdout=log, stderr=log, working_directory='.')\n ctx.open()\n app = tornado.web.Application(\n handlers = [(r\"/\", HandleUnknown),\n (r\"/ping/?\", HandlePing),\n (r\"/login/?\", HandleLogin),\n (r\"/auth/?\", HandleAuth),\n (r\"/portal/?\", HandlePortal)\n ],\n static_path =os.path.join(os.path.dirname(__file__), \"static\"),\n template_path=os.path.join(os.path.dirname(__file__), \"templates\"))\n server = tornado.httpserver.HTTPServer(app)\n server.listen(options.port)\n try:\n tornado.ioloop.IOLoop.instance().start()\n except KeyboardInterrupt:\n tornado.ioloop.IOLoop.instance().stop()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"web/tornado/wifigod/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"405642505","text":"import matplotlib\nfrom visualization.grill.config import (\n output_dir,\n ashvin_base_dir,\n format_func,\n our_method_name,\n configure_matplotlib,\n)\nimport matplotlib.pyplot as plt\nfrom rlkit.visualization import plot_util as plot\n\nconfigure_matplotlib(matplotlib)\n\n\ndirs = [\n ashvin_base_dir + 's3doodad/ashvin/vae/fixed3/sawyer-pusher/vae-dense-multi3/run1',\n ]\nf = plot.filter_by_flat_params({'algo_kwargs.num_updates_per_env_step': 4, 'rdim': 16, 'replay_kwargs.fraction_goals_are_rollout_goals': 0.2})\nher = plot.load_exps(dirs, f, suppress_output=True)\n\ndirs = [\n ashvin_base_dir + 's3doodad/ashvin/vae/fixed3/sawyer-pusher/vae-dense-multi3/run1',\n ]\nf = plot.filter_by_flat_params({'algo_kwargs.num_updates_per_env_step': 4, 'rdim': 16, 'replay_kwargs.fraction_goals_are_rollout_goals': 1.0, 'replay_kwargs.fraction_goals_are_env_goals': 0.0})\nnorelabel = plot.load_exps(dirs, f, suppress_output=True)\n\ndirs = [\n ashvin_base_dir + 's3doodad/ashvin/vae/fixed3/sawyer-pusher/vae-dense-multi3-fullrelabel/run1',\n]\nfullrelabel = plot.load_exps(dirs, suppress_output=True)\nplot.comparison(her + fullrelabel + norelabel, \"Final total_distance Mean\",\n [\"replay_kwargs.fraction_goals_are_rollout_goals\", \"replay_kwargs.fraction_goals_are_env_goals\", ],\n# [\"training_mode\", \"replay_kwargs.fraction_goals_are_env_goals\", \"replay_kwargs.fraction_goals_are_rollout_goals\", \"rdim\"],\n default_vary={\"replay_strategy\": \"future\"},\n smooth=plot.padded_ma_filter(10), figsize=(7.5, 4),\n xlim=(0, 500000), ylim=(0.15, 0.35),\n method_order=[1, 2, 0, 3])\nplt.gca().xaxis.set_major_formatter(plt.FuncFormatter(format_func))\nplt.ylabel(\"\")\nplt.xlabel(\"Timesteps\")\nplt.title(\"Visual Multi-object Pusher\")\nleg = plt.legend([our_method_name, \"None\", \"Future\", \"VAE\", ],\n bbox_to_anchor=(1.0, 0.5), loc=\"center left\",)\n# leg.get_frame().set_alpha(0.9)\nplt.tight_layout()\nplt.savefig(output_dir + \"multiobj_pusher_relabeling_ablation.pdf\")\nprint(\"File saved to\", output_dir + \"multiobj_pusher_relabeling_ablation.pdf\")\n","sub_path":"visualization/grill/multiobj_pusher_relabeling_ablation.py","file_name":"multiobj_pusher_relabeling_ablation.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"379022205","text":"import time\nimport socket\nimport sys\n\nsAddr = \"192.168.1.201\"\n\n# connect to socket\ndef connect_socket(addr):\n s = socket.socket()\n print(\"Socket created\")\n s.connect(addr)\n time.sleep(1)\n print(\"Socket connected\")\n\n return s\n\n\ndef sendData(sData):\n # set socket address\n addr = socket.getaddrinfo(sAddr, 33733)[0][-1]\n try:\n s = connect_socket(addr)\n # make sure server is ready\n msg = s.recv(100).decode('utf-8')\n if msg == \"ready\":\n # do something\n s.send(sData.encode('utf-8'))\n s.close()\n\n except:\n print('exception occured', sys.exc_info()[0])\n # GoDeepSleep(5)","sub_path":"SimpleClient.py","file_name":"SimpleClient.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"160305991","text":"import os\r\nfrom shutil import copyfile\r\nfrom shutil import move\r\nfrom random import randint\r\n\r\nsource = \"/home/vegas/CBIS-DDSM\"\r\ndestination = \"/home/vegas/CBIS-DDSM-COCO_format\"\r\ntrain = os.path.join(destination, 'train')\r\ntest = os.path.join(destination, 'test')\r\nval = os.path.join(destination, 'validation')\r\n\r\nos.mkdir(destination)\r\nos.mkdir(train)\r\nos.mkdir(test)\r\nos.mkdir(val)\r\nos.mkdir(os.path.join(train, 'annotations'))\r\nos.mkdir(os.path.join(train, 'shapes'))\r\nos.mkdir(os.path.join(test, 'annotations'))\r\nos.mkdir(os.path.join(test, 'shapes'))\r\nos.mkdir(os.path.join(val, 'annotations'))\r\nos.mkdir(os.path.join(val, 'shapes'))\r\n\r\ncounter = 0\r\n\r\nfor root, _, files in os.walk(source):\r\n if 'Train' in root:\r\n counter += 1\r\n for file in files:\r\n if '.png' in file:\r\n if 'mask' in file:\r\n mask = os.path.join(root, file)\r\n sep = mask.split(os.sep)\r\n image_id = sep[len(sep) - 2]\r\n copyfile(mask, os.path.join(train, 'annotations', image_id + '_mass.png'))\r\n else:\r\n image = os.path.join(root, file)\r\n sep = image.split(os.sep)\r\n image_id = sep[len(sep) - 2]\r\n copyfile(image, os.path.join(train, 'shapes', image_id + '.png'))\r\n\r\n elif 'Test' in root:\r\n counter += 1\r\n for file in files:\r\n if '.png' in file:\r\n if 'mask' in file:\r\n mask = os.path.join(root, file)\r\n sep = mask.split(os.sep)\r\n image_id = sep[len(sep) - 2]\r\n copyfile(mask, os.path.join(test, 'annotations', image_id + '_mass.png'))\r\n else:\r\n image = os.path.join(root, file)\r\n sep = image.split(os.sep)\r\n image_id = sep[len(sep) - 2]\r\n copyfile(image, os.path.join(test, 'shapes', image_id + '.png'))\r\n print('Processing {} of 1592'.format(counter))\r\n\r\nvalidation = []\r\nfor root, _, files in os.walk(os.path.join(train, 'shapes')):\r\n for file in files:\r\n if randint(0,1) <= 0.2:\r\n validation.append(file[:-4])\r\n move(os.path.join(root, file), os.path.join(val, 'shapes', file))\r\n\r\nfor root, _, files in os.walk(os.path.join(train, 'annotations')):\r\n for file in files:\r\n if file[:-9] in validation:\r\n move(os.path.join(root, file), os.path.join(val, 'annotations', file))\r\n\r\n","sub_path":"preprocess/pre-coco-data-format.py","file_name":"pre-coco-data-format.py","file_ext":"py","file_size_in_byte":2526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"436446381","text":"from myhdl import block,always_comb\n\n@block\n\ndef DMux(sel, inp,a,b):\n\n @always_comb\n def f():\n if (sel == 0):\n a = inp\n else:\n b = inp\n\n\n return f\n\ntest = [\n [0, 0, 0, 0],\n [1, 1, 0, 1],\n [0, 1, 1, 0],\n [1, 0, 0, 0],\n [0, 1, 1, 0],\n]\n","sub_path":"DMux.py","file_name":"DMux.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"468508039","text":"def solution(heights): \n answer = []\n tot = len(heights)\n for m in range(0,tot):\n me = heights.pop()\n flag = 0\n for i, x in enumerate(list(reversed(heights))):\n if x > me:\n answer.append(tot-1 - m -i)\n flag = 1\n break\n if not flag:\n answer.append(0)\n \n return list(reversed(answer))\n","sub_path":"programmers/level2/탑/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"269742726","text":"#\n# andre@corp.insite.com.br\n# 2017-10-10\n# Codigo que faz regressao simples e encontra embeddings\n#\n# a ideia aqui e a seguinte:\n# - carregar dados do movielens\n# - inicializar o embedding de forma aleatoria\n# - encontrar os embeddings de filmes e de usuarios que gerem o menor erro possivel\n# t8: retira os bias de filmes e usuarios e substitui por um unico bias global\n\nfrom __future__ import division\nfrom __future__ import print_function\nfrom time import gmtime, strftime, localtime\n\nimport math\nimport time\nimport sys\nimport os\n#from pylab import *\nfrom scipy import sparse\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport random\n\nfrom tensorflow.python import debug as tf_debug\n\nNUM_USERS = 247754\nNUM_MOVIES = 151712\nNUM_FEATURES = 11\nbatch_size = 9999\nnum_steps = 2000001\nbase_lbda = 0.01\ncount = 1\n# Regularization\nalpha = 0.00001\ndecay = 0.9999\nINPUT_FILE=\"ratings.csv\"\nprefix = \"t8-r{0:d}-l{1}-a{2}-{3}-\".format(NUM_FEATURES, base_lbda, alpha, INPUT_FILE)\n\nsys.stdout = open(prefix + \"out\", \"w\", 1)\n\ntf.set_random_seed(1)\n\nt0 = time.perf_counter()\n\ndef loga(msg):\n now = time.perf_counter()\n print(\"%6.2f: %s\" % (now - t0, msg))\n\ndef load_data(fname):\n print(\"Loading data from {}\".format(fname))\n full_train_data = pd.read_csv(INPUT_FILE, sep=\",\").sample(frac=1)\n train_data = np.array(full_train_data[[\"userId\", \"movieId\"]])\n train_labels = np.array(full_train_data[[\"rating\"]])\n NUM_USERS = np.amax(train_data[:,0]) + 1\n NUM_MOVIES = np.amax(train_data[:,1]) + 1\n num_ratings = train_data.shape[0]\n loga(\"NUM_USERS = {}\".format(NUM_USERS))\n loga(\"NUM_MOVIES = {}\".format(NUM_MOVIES))\n loga(\"num ratings = {}\".format(num_ratings))\n loga(\"batch_size = {}\".format(batch_size))\n loga(\"num_steps = {}\".format(num_steps))\n return train_data, train_labels\n\n\nif sys.argv[1].isdigit():\n NUM_FEATURES = int(sys.argv[1])\nelse:\n raise Exception(\"parameters NUM_FEATURES is required\")\n\ntrain_data, train_labels = load_data(INPUT_FILE)\n\ngraph = tf.Graph()\nwith graph.as_default():\n tf_train_data = tf.placeholder(tf.int32, shape=(batch_size, 2))\n tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, 1))\n tf_lr = tf.placeholder(tf.float32)\n\n tf_count = tf.get_variable(\"count\", dtype=tf.int32, initializer=tf.constant(count))\n if (NUM_FEATURES > 0):\n ones = tf.constant(1., shape=(NUM_FEATURES,1))\n user_embeddings = tf.get_variable(\"user_embeddings\", [NUM_USERS, NUM_FEATURES], initializer=tf.random_normal_initializer(0,1*math.sqrt(1/NUM_FEATURES)))\n movie_embeddings = tf.get_variable(\"movie_embeddings\", [NUM_MOVIES, NUM_FEATURES], initializer=tf.random_normal_initializer(0,1*math.sqrt(1/NUM_FEATURES)))\n tf_user_embeddings = tf.gather(user_embeddings, tf_train_data[:,0])\n tf_movie_embeddings = tf.gather(movie_embeddings, tf_train_data[:,1])\n else:\n user_embeddings = tf.get_variable(\"user_embeddings\", initializer = tf.constant(0.0))\n movie_embeddings = tf.get_variable(\"movie_embeddings\", initializer = tf.constant(0.0))\n #bias = tf.get_variable(\"bias\", dtype=tf.float32, initializer=tf.constant(3.5))\n user_bias = tf.get_variable(\"user_bias\", [NUM_USERS, 1], initializer=tf.random_normal_initializer(0.0))\n movie_bias = tf.get_variable(\"movie_bias\", [NUM_MOVIES, 1], initializer=tf.random_normal_initializer(3.5))\n tf_user_bias = tf.gather(user_bias, tf_train_data[:,0])\n tf_movie_bias = tf.gather(movie_bias, tf_train_data[:,1])\n\n #train_prediction = tf.tensordot(tf_user_embeddings, tf_movie_embeddings, axes=1)\n if (NUM_FEATURES > 0):\n train_prediction = tf.matmul(tf.multiply(tf_user_embeddings, tf_movie_embeddings), ones) + tf_movie_bias\n #train_prediction = tf.matmul(tf.multiply(tf_user_embeddings, tf_movie_embeddings), ones) + tf_movie_bias + bias\n else:\n #train_prediction = tf_user_bias + tf_movie_bias + bias \n train_prediction = tf_user_bias + tf_movie_bias\n error = tf.subtract(train_prediction, tf_train_labels)\n sse = tf.reduce_sum(tf.square(error))\n if (NUM_FEATURES > 0):\n regularization = tf.reduce_sum(tf.square(tf_user_embeddings))/NUM_FEATURES + tf.reduce_sum(tf.abs(tf_movie_embeddings))/NUM_FEATURES\n else:\n regularization = tf.reduce_sum(tf.square(tf_movie_bias)) + tf.reduce_sum(tf.square(tf_user_bias))\n\t# There's o need to regularize the biases\n\t# + tf.reduce_sum(tf.square(tf_movie_bias))*batch_size/NUM_MOVIES + tf.reduce_sum(tf.square(tf_user_bias)) * batch_size / NUM_USERS\n loss = sse + alpha * regularization\n mse = sse / batch_size\n optimizer = tf.train.GradientDescentOptimizer(tf_lr).minimize(loss)\n histogram = tf.histogram_fixed_width(error, [-4.5, 4.5], nbins=10)\n\n\nwith tf.Session(graph=graph) as session:\n tf.global_variables_initializer().run()\n print(\"Initialized\")\n uemb, memb = session.run([user_embeddings, movie_embeddings])\n print(\"user embeddings: {}\\n\",uemb)\n print(\"movie embeddings: {}\\n\",memb)\n acccount = acctot = 0.0\n old_loss = 1e20\n lr = base_lbda\n for step in range(num_steps):\n offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n batch_data = train_data[offset:(offset + batch_size), :]\n batch_labels = train_labels[offset:(offset + batch_size), :]\n feed_dict = {tf_train_data : batch_data, tf_train_labels : batch_labels, tf_lr: lr}\n _, l, predictions, uemb, memb, _mse, hist, ubias, mbias = session.run(\n [optimizer, loss, train_prediction, user_embeddings, movie_embeddings, mse, histogram, user_bias, movie_bias], feed_dict=feed_dict)\n acccount = acccount * 0.9999 + 1\n acctot = acctot * 0.9999 + _mse\n exploss = acctot/acccount\n if (step % 2000 == 0):\n if (exploss > old_loss):\n lr = lr * 0.1\n else:\n lr = lr * 1.02\n old_loss = exploss\n\t #\n loga(\"Minibatch loss at step %d: %f (%f)\" % (step, l, l/batch_size))\n print(\" Mean Square Error: %f - exp=%f\" % (_mse, acctot/acccount))\n print(\" Learning Rate: %f\" % (lr))\n if (NUM_FEATURES > 0):\n print(\"user embeddings: %f: %s\" % (np.linalg.norm(uemb)/uemb.size, np.mean(uemb, 0)))\n print(\"movie embeddings: %f: %s\" % (np.linalg.norm(memb)/memb.size, np.mean(memb, 0)))\n print(\"user bias: %f: %f\" % (np.linalg.norm(ubias)/ubias.size, np.mean(ubias, 0)))\n print(\"movie bias: %f: %f\" % (np.linalg.norm(mbias)/mbias.size, np.mean(mbias, 0)))\n #print(\"bias: %f\" % (_bias))\n print(\"error: %s\" % (hist))\n #print(\"user embeddings: %f\" % (user_embeddings))\n #print(\"embeddings: {}\".format(emb))\n #print(\"Minibatch accuracy: %.1f%%\" % accuracy(predictions, batch_labels))\n #print(\"Validation accuracy: %.1f%%\" % accuracy(\n #valid_prediction.eval(), valid_labels))\n #print(\"Test accuracy: %.1f%%\" % accuracy(test_prediction.eval(), test_labels))\n if lr < 1e-12:\n break\n print(\"steps done: {}\".format(step))\n if (NUM_FEATURES > 0):\n print(\"user_embeddings:\\n{}\".format(np.around(uemb, 3)))\n print(\"movie_embeddings:\\n{}\".format(np.around(memb, 3)))\n np.savetxt(prefix + \"user_embeddings.csv.gz\", uemb, delimiter=',', fmt=\"%.7f\")\n np.savetxt(prefix + \"movie_embeddings.csv.gz\", memb, delimiter=',', fmt=\"%.7f\")\n else:\n print(\"NO EMBEDDINGS\")\n np.savetxt(prefix + \"user_bias.csv.gz\", ubias, delimiter=',', fmt=\"%.7f\")\n np.savetxt(prefix + \"movie_bias.csv.gz\", mbias, delimiter=',', fmt=\"%.7f\")\n \n\n","sub_path":"t8.py","file_name":"t8.py","file_ext":"py","file_size_in_byte":7498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"525570499","text":"import sys\nimport matplotlib.pyplot as plt\n\ndef MovePoints(x, y, delta_x, delta_y):\n return x - delta_x, y - delta_y\n\ndef SymmetricReflection(x, y, x_mul, y_mul):\n return x * x_mul, y * y_mul\n\ndef DrawRectangle(x1, y1, x2, y2, clr):\n plt.plot([x1, x1, x2, x2, x1], [y1, y2, y2, y1, y1], color=clr)\n\ndef Yes():\n sys.stdout.write('Yes\\n')\n exit(0)\n\ndef No():\n sys.stdout.write('No\\n')\n exit(0)\n\ndef Solve():\n x1, y1, x2, y2 = map(int, raw_input().split(' '))\n x3, y3, x4, y4 = map(int, raw_input().split(' '))\n w, h = map(int, raw_input().split(' '))\n\n## plt.clf()\n## DrawRectangle(x1, y1, x2, y2, clr='R')\n## DrawRectangle(x3, y3, x4, y4, clr='B')\n## plt.savefig('image1.png')\n\n # Make transformations to bring corner left to the origin.\n x, y = MovePoints(x2, y2, x1, y1)\n x3, y3 = MovePoints(x3, y3, x1, y1)\n x4, y4 = MovePoints(x4, y4, x1, y1)\n\n # Make symmetric reflection to bring graveyard into the first quadrant.\n x_mul, y_mul = 1, 1\n if x < 0: x_mul = -1\n if y < 0: y_mul = -1\n\n x, y = SymmetricReflection(x, y, x_mul, y_mul)\n x3, y3 = SymmetricReflection(x3, y3, x_mul, y_mul)\n x4, y4 = SymmetricReflection(x4, y4, x_mul, y_mul)\n\n## plt.clf()\n## DrawRectangle(0, 0, x, y, clr='R')\n## DrawRectangle(x3, y3, x4, y4, clr='B')\n## plt.savefig('image2.png')\n\n if x3 >= x or x4 <= 0 or y3 >= y or y4 <= 0:\n if w <= x and h <= y:\n Yes()\n else:\n No()\n\n # Place the rectangle on the left or the right of the chappel.\n if h <= y and (x4 + w <= x or w <= x3):\n Yes()\n\n # Place the rectangle above or below the chappel.\n if w <= x and (y4 + h <= y or h <= y3):\n Yes()\n No()\n\nif __name__ == \"__main__\":\n Solve()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"NEERC/NORTHERN_QUARTERFINAL/G.py","file_name":"G.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"498467081","text":"import os\nimport numpy as np\nfrom urllib import request, parse\nimport gzip\nimport pickle\nimport collections\n\ndic_files = collections.OrderedDict([\n(\"training_images\", \"train-images-idx3-ubyte.gz\"),\n(\"training_labels\", \"train-labels-idx1-ubyte.gz\"),\n(\"test_images\", \"t10k-images-idx3-ubyte.gz\"),\n(\"test_labels\", \"t10k-labels-idx1-ubyte.gz\")])\n\npath_dir = 'mnist'\npath_pkl = os.path.join(path_dir, 'mnist.pkl')\n\ndef download():\n if not os.path.exists(path_dir):\n os.makedirs(path_dir)\n url_base = \"http://yann.lecun.com/exdb/mnist/\"\n for name, file in dic_files.items():\n url = parse.urljoin(url_base, file)\n path_file = os.path.join(path_dir, file)\n if not os.path.isfile(path_file):\n print(\"Downloading \" + name + \"...\")\n request.urlretrieve(url, path_file)\n\ndef save():\n download()\n if os.path.isfile(path_pkl):\n pass\n mnist = collections.OrderedDict()\n for name, file in dic_files.items():\n path_file = path_file = os.path.join(path_dir, file)\n f = gzip.open(path_file, 'rb')\n if 'images' in name:\n mnist[name] = np.frombuffer(f.read(), np.uint8, offset=16).reshape(-1,28*28)\n if 'labels' in name:\n mnist[name] = np.frombuffer(f.read(), np.uint8, offset=8)\n with open(path_pkl, 'wb') as f:\n pickle.dump(mnist,f)\n\ndef load():\n save()\n with open(path_pkl,'rb') as f:\n mnist = pickle.load(f)\n return list(mnist.values())\n","sub_path":"mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"352494816","text":"import time, pytest\nimport sys,os\nfrom _ast import Num\nsys.path.insert(1,os.path.abspath(os.path.join(os.path.dirname( __file__ ),'..','..','lib')))\nfrom clsCommon import Common\nimport clsTestService\nfrom localSettings import *\nimport localSettings\nfrom utilityTestFunc import *\nimport enums\n\n\nclass Test:\n\n #================================================================================================================================\n # @Author: Horia Cus\n # Test Name : Filter by Ownership - no search - co publisher - Category Page - pending tab\n # Test description:\n # Verify that proper entries are displayed while filtering them by ownership\n #================================================================================================================================\n testNum = \"4595\"\n\n supported_platforms = clsTestService.updatePlatforms(testNum)\n\n status = \"Pass\"\n timeout_accured = \"False\"\n driver = None\n common = None\n\n searchPage = \"Category Page - pending tab - no search\"\n filterMenuName = \"Filter by Ownership\"\n userType = \"Co Publisher\"\n categoryName = \"category for eSearch moderator\"\n\n @pytest.fixture(scope='module',params=supported_platforms)\n def driverFix(self,request):\n return request.param\n\n def test_01(self,driverFix,env):\n\n #write to log we started the test\n logStartTest(self,driverFix)\n try:\n ########################### TEST SETUP ###########################\n #capture test start time\n self.startTime = time.time()\n #initialize all the basic vars and start playing\n self,self.driver = clsTestService.initializeAndLoginAsUser(self, driverFix)\n self.common = Common(self.driver)\n # Entries and dictionaries\n self.entryNameOwner = \"Filter by Ownership - pending owner\"\n self.entryNameEditor = \"Filter by Ownership - pending co-editor\"\n self.entryNamePublisher = \"Filter by Ownership - pending co-publisher\"\n self.entryNameBoth = \"Filter by Ownership - pending both\"\n\n self.listAnyOwner = {self.entryNameOwner:True, self.entryNameEditor:True, self.entryNamePublisher:True, self.entryNameBoth:True}\n self.listAllInvalid = {self.entryNameOwner:False, self.entryNameEditor:False, self.entryNamePublisher:False, self.entryNameBoth:False}\n\n self.listOwner = {self.entryNameOwner:True, self.entryNameEditor:True, self.entryNamePublisher:True, self.entryNameBoth:True} \n self.listEditor = {self.entryNameOwner:False, self.entryNameEditor:True, self.entryNamePublisher:False, self.entryNameBoth:False} \n self.listPublisher = {self.entryNameOwner:False, self.entryNameEditor:False, self.entryNamePublisher:True, self.entryNameBoth:False}\n self.listBoth = {self.entryNameOwner:False, self.entryNameEditor:False, self.entryNamePublisher:False, self.entryNameBoth:True}\n \n self.enumAnyOwner = enums.Ownership.ANY_OWNER\n self.enumMediaOwn = enums.Ownership.MEDIA_OWN\n self.enumMediaEdit = enums.Ownership.MEDIA_EDIT\n self.enumMediaPublish = enums.Ownership.MEDIA_PUBLISH\n\n self.entriesMap = {self.enumAnyOwner:[self.listAnyOwner, enums.Ownership.ANY_OWNER.value, False], self.enumMediaOwn:[self.listAllInvalid, enums.Ownership.MEDIA_OWN.value, True], self.enumMediaEdit:[self.listAllInvalid, enums.Ownership.MEDIA_EDIT.value, True], self.enumMediaPublish:[self.listPublisher, enums.Ownership.MEDIA_PUBLISH.value, False]}\n ##################### TEST STEPS - MAIN FLOW #####################\n i = 1\n writeToLog(\"INFO\",\"Step \" + str(i) + \": Going to navigate to \" + self.searchPage)\n if self.common.channel.navigateToPendingaTab(self.categoryName, location=enums.Location.CATEGORY_PAGE) == False:\n self.status = \"Fail\"\n writeToLog(\"INFO\",\"Step \" + str(i) + \": FAILED to navigate to \" + self.searchPage)\n return\n else:\n i = i + 1\n i = i\n\n for entry in self.entriesMap:\n i = i\n writeToLog(\"INFO\", \"Step \" + str(i) + \": Going to filter \" + self.searchPage + \" entries by: \" + self.entriesMap[entry][1] + \"'\")\n if self.common.myMedia.SortAndFilter(enums.SortAndFilter.OWNERSHIP, entry) == False:\n self.status = \"Fail\"\n writeToLog(\"INFO\", \"Step\" + str(i) + \": FAILED to filter \" + self.searchPage + \" entries by '\" + self.entriesMap[entry][1] + \"'\")\n return\n else:\n i = i + 1\n\n writeToLog(\"INFO\", \"Step \" + str(i) + \": Going to verify filter \" + self.searchPage + \" entries by: \" + self.entriesMap[entry][1] + \" while using a \" + self.userType + \" user\")\n if self.common.channel.verifyFiltersInPendingTab(self.entriesMap[entry][0], self.entriesMap[entry][2]) == False:\n self.status = \"Fail\"\n writeToLog(\"INFO\", \"Step\" + str(i) + \": FAILED to verify filter \" + self.searchPage + \" entries by '\" + self.entriesMap[entry][1] + \" while using a \" + self.userType + \" user\")\n return\n else:\n i = i + 1\n\n writeToLog(\"INFO\", \"Step \" + str(i) + \": Going to clear the filter search menu\")\n if self.common.myMedia.filterClearAllWhenOpened() == False:\n self.status = \"Fail\"\n writeToLog(\"INFO\", \"Step\" + str(i) + \": Failed to clear the search menu\")\n return\n else:\n i = i + 1\n i = i\n ##################################################################\n writeToLog(\"INFO\",\"TEST PASSED: All the entries are properly displayed in \" + self.searchPage + \" while using \" + self.filterMenuName + \" filter with \" + self.userType + \" user\")\n # if an exception happened we need to handle it and fail the test\n except Exception as inst:\n self.status = clsTestService.handleException(self,inst,self.startTime)\n\n ########################### TEST TEARDOWN ###########################\n def teardown_method(self,method):\n try:\n self.common.handleTestFail(self.status)\n writeToLog(\"INFO\",\"**************** Starting: teardown_method ****************\")\n writeToLog(\"INFO\",\"**************** Ended: teardown_method *******************\")\n except:\n pass\n clsTestService.basicTearDown(self)\n #write to log we finished the test\n logFinishedTest(self,self.startTime)\n assert (self.status == \"Pass\")\n\n pytest.main('test_' + testNum + '.py --tb=line')","sub_path":"web/tests/eSearch/test_4595.py","file_name":"test_4595.py","file_ext":"py","file_size_in_byte":6903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"526587849","text":"#!/usr/bin/env python3\nimport sys\n\n\ndef solve(N: int, W: int, w: \"List[int]\", v: \"List[int]\"):\n dp = []\n for i in range(N+1):\n dp.append([0]*(W+1))\n\n for i in range(N):\n for cur_W in range(W+1):\n # i番目を選ばない時(選べない)\n if cur_W < w[i]:\n # cur_Wつまり仮の重さ限界を、その品物の重さが超えてればそもそも選べない\n dp[i+1][cur_W] = max(dp[i+1][cur_W],dp[i][cur_W])\n print('Not Select')\n else:#その品物単品で選んだ場合?\n dp[i+1][cur_W] = max(dp[i][cur_W],dp[i][cur_W - w[i]] + v[i])\n #iを選んだ場合の価値と、選ばない場合の価値の大きい方で更新\n print('Select')\n print(f'i+1,cur+W,w[i],v[i]={i+1},{cur_W},{w[i]},{v[i]}')\n for x in dp:\n print(x)\n\n print(dp[N][W]) \n return\n\n\n# Generated by 2.4.0 https://github.com/kyuridenamida/atcoder-tools (tips: You use the default template now. You can remove this line by using your custom template)\ndef main():\n def iterate_tokens():\n for line in sys.stdin:\n for word in line.split():\n yield word\n tokens = iterate_tokens()\n N = int(next(tokens)) # type: int\n W = int(next(tokens)) # type: int\n w = [int()] * (N) # type: \"List[int]\"\n v = [int()] * (N) # type: \"List[int]\"\n for i in range(N):\n w[i] = int(next(tokens))\n v[i] = int(next(tokens))\n solve(N, W, w, v)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"dp/D/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"394198576","text":"#coding=utf-8\nfrom handle.action import Action\nfrom selenium import webdriver\nfrom handle.excel_handle import Excel_handle\nimport time\n\n\n\"\"\"PC添加线索\"\"\"\nclass Clue_action:\n def __init__(self,driver):\n self.ha=Action(driver)\n self.driver=driver\n self.he = Excel_handle()\n def add_clue(self):\n rows = self.he.get_rows(10)\n try:\n for i in range(1, int(rows) + 1):\n is_run = self.he.get_value(i, 4, 10)\n if is_run == \"yes\":\n action_ways = self.he.get_value(i, 5, 10)\n input_data = self.he.get_value(i, 6, 10)\n # print(input_data)\n page = self.he.get_value(i, 7, 10)\n element = self.he.get_value(i, 8, 10)\n Expect_page = self.he.get_value(i, 9, 10)\n Expect_element = self.he.get_value(i, 10, 10)\n EXpect_result = self.he.get_value(i, 11, 10)\n element_number = self.he.get_value(i, 13, 10)\n pre_condition = self.he.get_value(i, 14, 10)\n pre_page = self.he.get_value(i, 15, 10) # 前置页面\n pre_element = self.he.get_value(i, 16, 10) # 前置元素\n pre_element_number = self.he.get_value(i, 17, 10) # 前置元素位置\n if action_ways == \"open_browser\":\n self.ha.open_url(input_data)\n self.driver.maximize_window() # 最大化屏幕\n elif action_ways == \"input_action\":\n if pre_condition == \"日期\": # 输入日期判断\n self.ha.data_input(page, element, element_number)\n input_data = input_data.strftime(\"%Y-%m-%d\")\n if input_data == \"姓名\":\n input_data = self.ha.create_name()\n name = input_data\n elif input_data == \"手机号码\":\n input_data = self.ha.createPhone()\n elif input_data == \"证件号码\":\n input_data = self.ha.ident_generator()\n elif input_data == \"学籍号\":\n input_data = self.ha.registration_number()\n self.ha.input_action(page, element, input_data, element_number)\n elif action_ways == \"click_action\":\n if pre_condition == \"日期\": # 输入日期判断\n self.ha.scroll_page(page, element, element_number)\n else:\n self.ha.click_action(page, element, element_number)\n elif action_ways == \"clear_action\":\n self.ha.clear_action(page, element, element_number)\n elif action_ways == \"wait\":\n self.ha.wait_action(int(input_data))\n elif action_ways == \"wait_element\":\n self.ha.wait_element_show(page, element)\n elif action_ways == \"wait_elemnt_click\":\n self.ha.wait_element_click(page, element)\n elif action_ways == \"page_scroll\":\n self.ha.page_scroll(page, element, element_number)\n elif action_ways == \"scroll_page\":\n self.ha.scroll_page(page, element, element_number)\n elif action_ways == \"father_son_click\":\n self.ha.father_son_click(page, element, pre_page, pre_element, number1=element_number,\n number2=pre_element_number)\n elif action_ways == \"element_text\":\n result = self.ha.element_text(page, element, element_number)\n print(result)\n if result == EXpect_result:\n flag = True\n self.he.write_cell_value(i, 12, \"Success\", \"add_clue_pc\")\n else:\n flag = False\n self.ha.save_screenshot_action(\"../screenshot/login.png\")\n self.he.write_cell_value(i, 12, \"Fail\", \"add_clue_pc\")\n if Expect_element != None: # 如果期待元素为空,则不执行\n try:\n flag = True\n self.ha.wait_element_show(Expect_page, Expect_element)\n self.he.write_cell_value(i, 12, \"Success\", \"add_clue_pc\")\n except Exception as e:\n flag = False\n self.ha.save_screenshot_action(\"../screenshot/\" + Expect_element + \".png\")\n self.he.write_cell_value(i, 12, \"Fail\", \"add_clue_pc\")\n except Exception as e:\n flag=False\n self.he.write_cell_value(i, 12, \"Fail\",\"add_clue_pc\")\n self.ha.save_screenshot_action(\"../screenshot/\"+element+\".png\")\n return flag\nif __name__==\"__main__\":\n driver = webdriver.Chrome()\n if(Clue_action(driver).add_clue()):\n print(\"执行成功\")\n else:\n print(\"执行失败\")\n driver.quit()","sub_path":"venv/bussiness/add_clue_pc.py","file_name":"add_clue_pc.py","file_ext":"py","file_size_in_byte":5357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"438985173","text":"from selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import Select\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport glob\n\n\ndata = []\nurls = ['http://140.112.161.154/regquery/Chinese.aspx',\n 'http://140.112.161.154/regquery/Foreign.aspx',\n 'http://140.112.161.154/regquery/Reqcou.aspx',\n 'http://140.112.161.154/regquery/FreshmanSeminar.aspx',\n 'http://140.112.161.154/regquery/Physical.aspx',\n 'http://140.112.161.154/regquery/MilTr.aspx',]\n\noptions = Options()\noptions.add_argument('--headless')\ndriver = webdriver.Chrome('./chromedriver', options=options)\n\nfor url in urls:\n print(url)\n driver.get(url)\n while True:\n try:\n WebDriverWait(driver, 60).until(EC.presence_of_element_located((By.CLASS_NAME, 'main')))\n except Exception as e:\n print(e)\n\n soup = BeautifulSoup(driver.page_source, 'html.parser')\n\n table_raw = soup.find('table', id='MainContent_GridView1')\n\n if table_raw is None:\n print('None')\n break\n\n table_row = table_raw.select('tr')\n\n for tr in table_row[1:-2]:\n td = tr.find_all('td')\n data.append([t.text.strip() for t in td])\n\n try:\n next_page = driver.find_element_by_link_text('下一頁').click()\n except NoSuchElementException:\n break\n\ndriver.get('http://140.112.161.154/regquery/Dept.aspx')\ntry:\n WebDriverWait(driver, 60).until(EC.presence_of_element_located((By.CLASS_NAME, 'main')))\nexcept Exception as e:\n print(e)\n\nsoup = BeautifulSoup(driver.page_source, 'html.parser')\n\ncolleges = soup.find('select', id=\"MainContent_ddCollege\")\ncolleges = colleges.select('option')\n# print(colleges)\n\nfor college in colleges[1:]:\n print(college['value'])\n\n colleges_select = Select(driver.find_element_by_id(\"MainContent_ddCollege\"))\n colleges_select.select_by_value(college['value'])\n try:\n WebDriverWait(driver, 60).until(EC.presence_of_element_located((By.ID, \"MainContent_ddDptcode\")))\n except Exception as e:\n print(e)\n soup = BeautifulSoup(driver.page_source, 'html.parser')\n\n depts = soup.find('select', id=\"MainContent_ddDptcode\")\n depts = depts.select('option')\n for dept in reversed(depts):\n print('-', dept['value'])\n dept_select = Select(driver.find_element_by_id(\"MainContent_ddDptcode\"))\n dept_select.select_by_value(dept['value'])\n soup = BeautifulSoup(driver.page_source, 'html.parser')\n while True:\n try:\n WebDriverWait(driver, 60).until(EC.presence_of_element_located((By.CLASS_NAME, 'main')))\n except Exception as e:\n print(e)\n\n soup = BeautifulSoup(driver.page_source, 'html.parser')\n\n table_raw = soup.find('table', id='MainContent_GridView1')\n\n if table_raw is None:\n print('None')\n break\n\n table_row = table_raw.select('tr')\n\n for tr in table_row[1:-2]:\n td = tr.find_all('td')\n data.append([t.text.strip() for t in td])\n\n try:\n next_page = driver.find_element_by_link_text('下一頁').click()\n except NoSuchElementException:\n try:\n next_page = driver.find_element_by_link_text('第一頁').click()\n except NoSuchElementException:\n pass\n break\n\ndriver.quit()\n\ndf_new = pd.DataFrame(data, columns=['流水號', '課號', '課程識別碼', '班次', '課程名稱', '學分', '授課教師', '通識領域', '加選方式', '上課時間',\n '限制條件', '人數上限', '外系上限', '外校上限', '已選上人數', '已選上外系人數', '登記人數', '剩餘名額'])\ndf_new = df_new[['流水號', '人數上限', '外系上限', '外校上限', '已選上人數', '已選上外系人數', '登記人數', '剩餘名額']]\nprint(df_new)\ndf_new = df_new.drop_duplicates(subset=['流水號'], keep='last')\ndf_new.to_csv('./save_csv/popularity.csv', encoding='utf-8-sig', index=False)\n\nfor file in glob.iglob('./save_csv/' + '**/*' + '.csv', recursive=True):\n if str(file) == './save_csv/popularity.csv':\n continue\n print(file)\n df_old = pd.read_csv(file)\n df_old['流水號'] = df_old['流水號'].fillna(0).astype(int)\n # print(df_old.head())\n\n df_new = df_new.astype(int)\n\n df = pd.merge(df_old, df_new, on='流水號', how='left')\n print(df)\n file_name = file.split('/')[-1]\n df = df.sort_values(by=['登記人數'], ascending=False)\n df.to_csv(f'./new_csv/{file_name}', encoding='utf-8-sig', index=False)","sub_path":"popularity.py","file_name":"popularity.py","file_ext":"py","file_size_in_byte":5001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"427364744","text":"import sqlite3\nimport tornado\nimport tornado.web\nimport tornado.ioloop\nimport tornado.httpserver\nimport os\n\nBASEDIR = os.path.dirname(__file__)\nTEMPLATES = os.path.join(BASEDIR, 'templates')\n\n\n# Just for caution\nclass DatabaseSingleton(type):\n def __call__(cls, *args, **kwargs):\n if not hasattr(cls, 'db'):\n cls.db = super(DatabaseSingleton, cls).__call__(*args, **kwargs)\n return cls.db\n\n\n# Model\nclass DatabaseConnection(metaclass=DatabaseSingleton):\n def __init__(self):\n self._status = None\n self._connection = None\n self._cursor = None\n\n def set_status(self, new_status):\n self._status = new_status\n\n def get_status(self):\n return self._status\n\n def connect(self, name):\n self._connection = sqlite3.connect(name)\n self.get_cursor()\n\n def get_cursor(self):\n self._cursor = self._connection.cursor()\n\n def execute(self, query):\n return self._cursor.execute(query)\n\n def commit_and_close(self):\n self._connection.commit()\n self._connection.commit()\n\n\n# Controllers\nclass IndexHandler(tornado.web.RequestHandler):\n def get(self):\n query = \"select * from task\"\n todos = db_execute(query)\n self.render('index.html', todos=todos)\n\n\nclass NewHandler(tornado.web.RequestHandler):\n def post(self):\n name = self.get_argument('name', None)\n query = \"create table if not exists task (id INTEGER \\\n PRIMARY KEY, name TEXT, status NUMERIC)\"\n db_execute(query)\n query = f\"insert into task (name, status) values {name, 1}\"\n db_execute(query)\n self.redirect('/')\n\n def get(self):\n self.render('new.html')\n\n\nclass UpdateHandler(tornado.web.RequestHandler):\n def get(self, id, status):\n query = f\"update task set status={int(status)} where id={id}\"\n db_execute(query)\n self.redirect('/')\n\n\nclass DeleteHandler(tornado.web.RequestHandler):\n def get(self, id):\n query = f\"delete from task where id={id}\"\n db_execute(query)\n self.redirect('/')\n\n\nclass RunApp(tornado.web.Application):\n def __init__(self):\n handlers = [\n (r'/', IndexHandler),\n (r'/todo/new', NewHandler),\n (r'/todo/update/(\\d+)/(\\d+)', UpdateHandler),\n (r'/todo/delete/(\\d+)', DeleteHandler)\n ]\n settings = {\n 'debug': True,\n 'template_path': TEMPLATES,\n 'static_path': 'static'\n }\n tornado.web.Application.__init__(self, handlers, **settings)\n\n\ndef db_execute(query):\n return db.execute(query)\n\n\nif __name__ == '__main__':\n db = DatabaseConnection()\n db.connect(os.path.join(BASEDIR, \"task.db\"))\n # FIXME: works but is ugly\n try:\n http_server = tornado.httpserver.HTTPServer(RunApp())\n http_server.listen(5000)\n tornado.ioloop.IOLoop.instance().start()\n finally:\n db.commit_and_close()\n","sub_path":"mvc/examples/webapp_example.py","file_name":"webapp_example.py","file_ext":"py","file_size_in_byte":2962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"362786874","text":"from __future__ import unicode_literals\nfrom django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse, JsonResponse\nfrom django.template import loader\nfrom rest_framework import status\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom twilio.rest import Client\nimport json\nimport requests\nfrom flask import *\nfrom django.http import HttpResponseRedirect\nfrom pprint import pprint\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.shortcuts import render\nfrom django.shortcuts import render\nfrom .models import *\nfrom rest_framework.views import APIView\nfrom django.template import loader\nfrom rest_framework.response import Response\nfrom django.http import HttpResponse, JsonResponse\nimport pprint\nimport json\nfrom flask import *\nfrom rest_framework import status\nimport requests\nimport random\nimport urllib.request\nimport urllib.parse\n# Create your views here.\n\nclass UserRegister(APIView):\n def post(self,request):\n name=request.POST['name']\n password=request.POST['password']\n userId=request.POST['userid']\n phone = request.POST['phone']\n interest = request.POST['interest']\n userObjects = User.objects.all()\n success = 1\n responseDictionary = dict()\n for userObject in userObjects:\n if userObject.userId == userId:\n success = 0\n\n if success==1:\n userObj = User.objects.create(\n name=name,\n userId = userId,\n password = password,\n contact = phone,\n verificationCode = 000000,\n location = \"26.884570, 80.995931\",\n fieldOfInterest = interest,\n message = \"Joined abhay\",\n )\n userObj.save()\n responseDictionary['success'] = 1\n if success==0:\n return HttpResponse('registration failed')\n else :\n return HttpResponse(json.dumps(responseDictionary))\n\n\nclass UserLogin(APIView):\n def post(self,request):\n userId = request.POST['id']\n password = request.POST['password']\n userObjects = User.objects.all()\n success = 0\n responseDictionary = dict()\n for userObject in userObjects:\n if userObject.userId == userId and userObject.password == password:\n success = 1\n responseDictionary['name'] = userObject.name\n responseDictionary['interest'] = userObject.fieldOfInterest\n responseDictionary['id'] = userObject.userId\n if success == 1:\n return HttpResponse(json.dumps(responseDictionary))\n else:\n return HttpResponse(\"unsuccessful\")\n\n\n\nclass LocationUpdate(APIView):\n def post(self,request):\n userId = request.POST['id']\n lat = request.POST['currentlat']\n long = request.POST['currentlong']\n userObjects = User.objects.all()\n for userObject in userObjects:\n if userObject.userId == userId:\n location = str(lat)+\",\"+str(long)\n userObject.location = location\n userObject.save()\n temp = dict()\n temp['status']='1'\n return HttpResponse(json.dumps(temp))\n\n\n\nclass RequestOtp(APIView):\n def post(self,request):\n\n userId = request.POST['id']\n verification = random.randint(100000, 999999)\n userObjects = User.objects.all()\n responseDictionary = dict()\n success = 0\n for userObject in userObjects:\n if userObject.userId == userId:\n userObject.verificationCode = verification\n responseDictionary['otp'] = verification\n success = 1\n userObject.save()\n if success == 1:\n return HttpResponse(json.dumps(responseDictionary))\n else:\n return HttpResponse(\"failed\")\n\n\nclass Message(APIView):\n def post(self,request):\n userId = request.POST['id']\n message = request.POST['message']\n date = request.POST['date']\n time = request.POST['time']\n userObjects = User.objects.all()\n for userObject in userObjects:\n if userObject.userId == userId:\n messageObject = Messages.objects.create(\n message=message,\n time = time,\n date = date,\n user = userObject,\n\n )\n messageObject.save()\n temp = dict()\n temp['status']='1'\n return HttpResponse(json.dumps(temp))\n\n\nclass Sos(APIView):\n def post(self,request):\n userId = request.POST['id']\n\n parentObjects = Parent.objects.all()\n for parentObject in parentObjects:\n if parentObject.user.userId == userId:\n no = '91'+str(parentObject.user.contact)\n print(no)\n msg = str(parentObject.user.name)+\" needs your help, last known location \"+str(parentObject.user.location)+\". Contact no \"+str(parentObject.user.contact)\n contactNo = parentObject.contact\n\n try:\n account_sid = \"ACbd2fe3dca93a5716fa8207d0d56ce5b1\"\n auth_token = \"3c9dcbe20fc79b6d7e3f14316c5867a9\"\n client = Client(account_sid, auth_token)\n client.messages.create(\n to=(\"+\" + str(parentObject.user.name)),\n from_=\"+18165216110\",\n body=(str(parentObject.user.name)+\" needs your help, last known location \"+str(parentObject.user.location)+\". Contact no \"+str(parentObject.user.contact)))\n client.messages.create(\n to=(\"+91\" + str(no)),\n from_=\"+18165216110\",\n body=(\"Follow this link to track your order http://165.227.97.128:8000//request/getdriverlocation/\"\" .\"),\n )\n except:\n pass\n\n temp = dict()\n temp['status']='1'\n return HttpResponse(json.dumps(temp))\n\n\n\ndef home(request):\n template = loader.get_template('home/index.html')\n context = {\n 'registrationfailed': 0,\n 'loginfailed': 0,\n }\n return HttpResponse(template.render(context, request))\n\n\n\n\n\ndef register(request):\n name = request.POST['name']\n username = request.POST['username']\n password = request.POST['password']\n userId = request.POST['userId']\n verification = request.POST['verification']\n verification = int(verification)\n contact = request.POST['contact']\n userObjects = User.objects.all()\n\n exists = 0\n\n for userObject in userObjects:\n if userObject.userId == userId and int(userObject.verificationCode) == verification:\n exists = 1\n parentObject = Parent.objects.create(\n name = name,\n userId = username,\n password = password,\n user = userObject,\n contact = contact,\n )\n parentObject.save()\n template = loader.get_template('home/index.html')\n context = {\n 'registrationfailed': 0,\n 'loginfailed': 0,\n }\n\n\n if(exists == 0):\n template = loader.get_template('home/index.html')\n context = {\n 'registrationfailed': 1,\n 'loginfailed': 0,\n }\n\n return HttpResponse(template.render(context,request))\n\n\ndef login(request):\n userId = request.POST['username']\n password = request.POST['password']\n parentObjects = Parent.objects.all()\n success=0\n for parentObject in parentObjects:\n if parentObject.userId == userId and parentObject.password == password:\n lat,long = parentObject.user.location.split(',')\n messageObjects = Messages.objects.all()\n messages = []\n for messageObject in messageObjects:\n if messageObject.user.userId == parentObject.user.userId:\n message = dict()\n message['message']=messageObject.message\n message['date']=messageObject.date\n message['time']=messageObject.time\n messages.append(message)\n success=1\n template = loader.get_template('home/dashboard.html')\n context = {\n 'registrationfailed': 0,\n 'loginfailed': 0,\n 'username' : parentObject.name,\n 'messages' : messages,\n 'lat' : lat,\n 'long' : long,\n }\n if success==0:\n template = loader.get_template('home/index.html')\n context = {\n 'registrationfailed': 0,\n 'loginfailed': 1,\n }\n return HttpResponse(template.render(context,request))\n","sub_path":"womensecurity/home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"390725464","text":"#Given an unsorted integer array A, print the contents of the array in the\n# given format: {arrayindex:value, arrayindex:value}.\n# Note that there is no comma after the last value. \n\ndef Dictionary(Array):\n\n Dictionary={}\n X=len(Array)\n index=0\n\n while (index!=X):\n Dictionary[index]=Array[index]\n index+=1\n\n return Dictionary\n\n#N=input(\"Enter the number of elements: \")\n\n#Array=[]\n\n#for a in range(0,N):\n\n #Elements=input(\"Enter the Elements in Array: \")\n\n #Array.append(Elements)\n\n#print(\"The given Array is\", Array)\n\n#print(\"The Array in the format\", Dictionary(Array))\n","sub_path":"Index and Value/dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"338787016","text":"'''\nCreated on Jun 22, 2019\n\n@author: asharda\n'''\n\nimport numpy as np,matplotlib.pyplot as plt\nnum_points=5\nx_min,x_max=0,4\nx_values=np.linspace(x_min,x_max,num_points)\ny_values=x_values**2\nplt.plot(x_values,y_values)\nplt.show()\n","sub_path":"pyplotex.py","file_name":"pyplotex.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"614221536","text":"from logger import *\nimport logging\n\n__all__ = ['Fn_ControlValves']\n\nclass Fn_ControlValves:\n\n\n def __init__(self,com,df,idxNo,logger):\n self._idxNo =idxNo\n self.gen = com\n self.devicename = df.iloc[self._idxNo, 0]\n self.logger = logger\n self.df = df\n self.setup()\n self.controlvalveinitilization()\n\n\n def setup(self):\n try:\n self.sp = self.df.iloc[self._idxNo, 3]\n self.pv = str(self.df.iloc[self._idxNo, 4])\n self.gen.writegeneral.writenodevalue(self.pv, 0)\n self.delaytime = self.df.iloc[self._idxNo, 5]\n self.highpvvalue = self.df.iloc[self._idxNo,6]\n self.lowpvvalue = self.df.iloc[self._idxNo, 7]\n\n\n except Exception as e:\n print(\"exception raise\", e.args)\n log_exception(e)\n\n def controlvalveinitilization(self):\n self.process()\n\n def process(self):\n\n try:\n\n rawspvalue = self.gen.readgeneral.readnodevalue(self.sp)\n self.currentvalue = self.gen.readgeneral.readtagvalue(self.pv)\n if rawspvalue > 0.0:\n if rawspvalue > self.currentvalue:\n diff = rawspvalue - self.currentvalue\n self.currentvalue = self.currentvalue + (diff / self.delaytime)\n\n if rawspvalue < self.currentvalue:\n diff = self.currentvalue - rawspvalue\n self.currentvalue = self.currentvalue - (diff / self.delaytime)\n else:\n self.currentvalue = self.gen.readgeneral.readnodevalue(self.pv)\n\n self.gen.writegeneral.writenodevalue(self.pv, self.currentvalue)\n\n level1 = logging.WARNING\n messege1 = self.devicename + \":\" + self.pv + \" value is \" + str(self.currentvalue)\n self.logger.log(level1, messege1)\n\n except Exception as e:\n log_exception(e)\n\n def scalingconvtoraw(self, val, highlimit, lowlimit):\n rawvalue = int((val * 27648) / (highlimit - lowlimit))\n\n return rawvalue\n\n @property\n def processval(self):\n pv = float((self.currentvalue/27648)*(self.highpvvalue - self.lowpvvalue))\n return pv\n\n @property\n def setpoint(self):\n return self.sp\n\n\n def readalltags(self):\n n = 3\n row, col = self.df.shape\n print(col)\n while n < col:\n data = self.df.iloc[self._idxNo, n]\n yield data,n\n n = n + 1\n","sub_path":"CASTERSIMULATION/Drive/fn_controlvalve_V1.py","file_name":"fn_controlvalve_V1.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"189952298","text":"import pytsk3\nimport sys\nimport json\nimport struct\nimport binascii\nimport datetime\nimport csv\n\nclass ObjectId(object):\n def __init__(self, buf):\n self._buffer = buf\n\n @property\n def timestamp(self):\n # http://computerforensics.parsonage.co.uk/downloads/TheMeaningofLIFE.pdf\n # The file ObjectID is a time based version which means it is created using a system time.\n # The time is a 60 bit time value, a count of 100 nanosecond intervals of UTC since midnight\n # at the start of 15th October 1582.\n\n # Get le uint64\n le_timestamp = struct.unpack(\"H\", self._buffer[6:8])[0]\n return high_order & 0x000f\n\n @property\n def variant(self):\n field = struct.unpack(\">H\", self._buffer[8:10])[0]\n return field >> 14\n\n @property\n def sequence(self):\n field = struct.unpack(\">H\", self._buffer[8:10])[0]\n return field & 0x3FFF\n\n @property\n def mac(self):\n return binascii.hexlify(self._buffer[10:16])\n\n\n\ndef printOBJID(file_entry, fullpath):\n for attribute in file_entry:\n if attribute.info.type == pytsk3.TSK_FS_ATTR_TYPE_NTFS_OBJID:\n rawoid = file_entry.read_random(0, 16, attribute.info.type, attribute.info.id)\n print(binascii.hexlify(rawoid), fullpath)\n object_id = ObjectId(rawoid)\n wr.writerow([fullpath,object_id.rawtimestamp, object_id.timestamp, datetime.datetime.fromtimestamp(file_entry.info.meta.crtime).strftime('%Y-%m-%d %H:%M:%S'),object_id.mac,object_id.version, object_id.variant, object_id.sequence,file_entry.info.meta.addr,file_entry.info.meta.seq ])\n return\n\n\ndef directoryRecurse(directoryObject, parentPath):\n for entryObject in directoryObject:\n #print (\"entry \", entryObject.info.name.name.decode(\"utf-8\"))\n if entryObject.info.name.name.decode(\"utf-8\") in [\".\", \"..\"]:\n continue\n\n try:\n f_type = entryObject.info.meta.type\n except:\n #print(\"Cannot retrieve type of\", entryObject.info.name.name.decode(\"utf-8\"))\n continue\n\n try:\n\n filepath = '/%s/%s' % ('/'.join(parentPath),entryObject.info.name.name.decode(\"utf-8\"))\n #print(\"path \", filepath)\n if f_type == pytsk3.TSK_FS_META_TYPE_DIR:\n sub_directory = entryObject.as_directory()\n parentPath.append(entryObject.info.name.name.decode(\"utf-8\"))\n printOBJID(entryObject, filepath)\n directoryRecurse(sub_directory, parentPath)\n parentPath.pop(-1)\n\n elif f_type == pytsk3.TSK_FS_META_TYPE_REG and entryObject.info.meta.size != 0:\n printOBJID(entryObject, filepath)\n\n elif f_type == pytsk3.TSK_FS_META_TYPE_REG and entryObject.info.meta.size == 0:\n printOBJID(entryObject, filepath)\n\n\n except IOError as e:\n print(e)\n continue\n\n\nimg = pytsk3.Img_Info('\\\\\\\\.\\\\C:')\nfs = pytsk3.FS_Info(img)\noutfile = open('objectids.csv','w')\noutfile.write('\"Full Path\",\"ObjectID Raw Timestamp\", \"ObjectID Timestamp\",\"Filesystem creation\",\"ObjectID MAC\",\"ObjectID Version\",\"ObjectID Variant\",\"ObjectID Sequence\",\"File Entry\", \"Sequence\"\\n')\nwr = csv.writer(outfile, quoting=csv.QUOTE_ALL)\n\ndirectoryObject = fs.open_dir(path=\"/\")\ndirectoryRecurse(directoryObject, [])\nfileobject = fs.open(\"/$MFT\")\nprint(\"File Inode:\",fileobject.info.meta.addr)\nprint(\"File Name:\",fileobject.info.name.name)\nprint(\"File Creation Time:\",datetime.datetime.fromtimestamp(fileobject.info.meta.crtime).strftime('%Y-%m-%d %H:%M:%S'))\noutFileName = fileobject.info.name.name\nprint(outFileName)\noutfile = open(outFileName, 'wb')\nfiledata = fileobject.read_random(0,fileobject.info.meta.size)\noutfile.write(filedata)\noutfile.close\n","sub_path":"ObjectIDScannerV4.py","file_name":"ObjectIDScannerV4.py","file_ext":"py","file_size_in_byte":4425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"183872034","text":"import logging\nimport threading\nimport time\n\nlogging.basicConfig(level=logging.DEBUG,\n format='[%(levelname)s (%(threadName)-10s %(message)s)]')\n\ndef worker():\n logging.debug('Starting')\n time.sleep(2)\n logging.debug('exiting')\n\ndef my_service():\n logging.debug('Starting')\n time.sleep(3)\n logging.debug('exiting')\n\nx = threading.Thread(name='my_service', target=my_service)\ny = threading.Thread(name='worker', target=my_service)\nz = threading.Thread(target=worker)\n\ny.start()\nz.start()\nx.start()","sub_path":"Advance Python/Multithreading/ThreadLogging.py","file_name":"ThreadLogging.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"559380563","text":"from igeopy.extension import (get_geoserver_extensions, select_extensions, \n get_geoserver_extensions_community)\nfrom igeopy.image import create_install_script, create_dockerfile\n\n# Retrieve link and show selection menu\nextensions = get_geoserver_extensions(g_version=\"2.15.0\")\nextensions_community = get_geoserver_extensions_community(g_version=\"2.15.x\")\nselected_links = select_extensions(extensions)\nselected_links_community = select_extensions(extensions_community, min_selection_count=0)\n\n# Join selected links\nselected_links.extend(selected_links_community)\n\ninstall_path = create_install_script(selected_links, \"./install.sh\", \n \"/usr/local/tomcat/webapps/geoserver/WEB-INF/lib\")\ncreate_dockerfile(install_path)\n","sub_path":"geoserver/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"510086422","text":"import logging\nimport os.path\nimport pickle\nfrom googleapiclient.errors import HttpError\nfrom googleapiclient.discovery import build\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\nfrom typing import Dict, List, Optional\n\n# If modifying these scopes, delete the file token.pickle.\nSCOPES: List[str] = [\n \"https://www.googleapis.com/auth/spreadsheets\",\n \"https://www.googleapis.com/auth/gmail.readonly\",\n \"https://www.googleapis.com/auth/gmail.send\",\n]\n\n\ndef build_service_spreadsheet() -> build:\n creds: build.credentials = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists(\"google_api/token.pickle\"):\n with open(\"google_api/token.pickle\", \"rb\") as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n \"google_api/credentials.json\", SCOPES\n )\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open(\"google_api/token.pickle\", \"wb\") as token:\n pickle.dump(creds, token)\n\n serv = build(\"sheets\", \"v4\", credentials=creds, cache_discovery=False)\n return serv\n\n\ndef create_sheet_template() -> Optional[Dict[str, str]]:\n try:\n service: build = build_service_spreadsheet()\n except Exception as e:\n logging.error(f'Can not make service for sheets {e}', exc_info=True)\n return None\n\n try:\n if not os.path.exists(\"google_api/sprsh_link.txt\"):\n with open(\"google_api/sprsh_link.txt\", \"w\") as file:\n file.write(\n f\"Spreadsheet id | Sheet name | Spreadsheet_link\\n\\n\"\n )\n spreadsheet = (\n service.spreadsheets().create(\n body={\n \"properties\": {\n \"title\": \"Telegram_bot_applicatons\",\n \"locale\": \"ru_RU\",\n },\n \"sheets\": [\n {\n \"properties\": {\n \"sheetType\": \"GRID\",\n \"sheetId\": 0,\n \"title\": \"bot_appl\",\n }\n }\n ],\n }\n ).execute()\n )\n\n service.spreadsheets().values().append(\n spreadsheetId=spreadsheet[\"spreadsheetId\"],\n valueInputOption=\"USER_ENTERED\",\n range=spreadsheet[\"sheets\"][0][\"properties\"][\"title\"],\n insertDataOption=\"INSERT_ROWS\",\n body={\n \"values\": [\n [\n \"Telegram_id\",\n \"Имя пользователя\",\n \"Телефон\",\n \"email\",\n \"Желаемый курс\",\n \"Время создания заявки\",\n ]\n ],\n },\n ).execute()\n\n service.spreadsheets().batchUpdate(\n spreadsheetId=spreadsheet[\"spreadsheetId\"],\n body={\n \"requests\": [\n {\n \"repeatCell\": {\n \"cell\": {\n \"userEnteredFormat\": {\n \"horizontalAlignment\": \"CENTER\",\n \"textFormat\": {\"fontSize\": 12},\n }\n },\n \"range\": {\n \"sheetId\": 0,\n \"startRowIndex\": 0,\n \"endRowIndex\": 1,\n \"startColumnIndex\": 0,\n \"endColumnIndex\": 6,\n },\n \"fields\": \"userEnteredFormat\",\n }\n },\n {\n \"updateDimensionProperties\": {\n \"range\": {\n \"sheetId\": 0,\n \"dimension\": \"COLUMNS\",\n \"startIndex\": 0,\n \"endIndex\": 1,\n },\n \"properties\": {\"pixelSize\": 140},\n \"fields\": \"pixelSize\",\n }\n },\n {\n \"updateDimensionProperties\": {\n \"range\": {\n \"sheetId\": 0,\n \"dimension\": \"COLUMNS\",\n \"startIndex\": 1,\n \"endIndex\": 3,\n },\n \"properties\": {\"pixelSize\": 170},\n \"fields\": \"pixelSize\",\n }\n },\n {\n \"updateDimensionProperties\": {\n \"range\": {\n \"sheetId\": 0,\n \"dimension\": \"COLUMNS\",\n \"startIndex\": 3,\n \"endIndex\": 5,\n },\n \"properties\": {\"pixelSize\": 200},\n \"fields\": \"pixelSize\",\n }\n },\n {\n \"updateDimensionProperties\": {\n \"range\": {\n \"sheetId\": 0,\n \"dimension\": \"COLUMNS\",\n \"startIndex\": 5,\n \"endIndex\": 6,\n },\n \"properties\": {\"pixelSize\": 250},\n \"fields\": \"pixelSize\",\n }\n },\n ]\n },\n ).execute()\n\n with open(\"google_api/sprsh_link.txt\", \"a\") as file:\n file.write(\n f'{spreadsheet[\"spreadsheetId\"]} | '\n f'{spreadsheet[\"sheets\"][0][\"properties\"][\"title\"]} | '\n f'{spreadsheet[\"spreadsheetUrl\"]}\\n'\n )\n logging.info('Successfully created spreadsheet')\n return {\n \"spreadsheet_id\": spreadsheet[\"spreadsheetId\"],\n \"sheet_name\": spreadsheet[\"sheets\"][0][\"properties\"][\"title\"],\n }\n except HttpError as e:\n logging.error(f'Can not make sheet template {e}', exc_info=True)\n return None\n\n\ndef add_data_to_sprsh(\n user: Dict[str, str], spreadsheet: Optional[Dict[str, str]]) -> bool:\n try:\n service: build = build_service_spreadsheet()\n except Exception as e:\n logging.error(f'Can not make service for sheets {e}', exc_info=True)\n return False\n try:\n service.spreadsheets().values().append(\n spreadsheetId=spreadsheet[\"spreadsheet_id\"],\n valueInputOption=\"USER_ENTERED\",\n range=spreadsheet[\"sheet_name\"],\n insertDataOption=\"INSERT_ROWS\",\n body={\n \"values\": [[item for item in user.values()]],\n },\n ).execute()\n logging.info('Successfully added data to spreadsheet')\n return True\n except HttpError as e:\n logging.error(f'Can not add data to sheet {e}', exc_info=True)\n return False\n","sub_path":"google_api/spreadsheets.py","file_name":"spreadsheets.py","file_ext":"py","file_size_in_byte":7923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"653456700","text":"#!/bin/env python3\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.dates import (MONTHLY, WEEKLY, DateFormatter, rrulewrapper, RRuleLocator)\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom datetime import date\nimport sys\n\ndata = np.genfromtxt(\"/scratch/dknapp4/Western_Hawaii/Moorea/moorea_sample_coral_output_rb_20190716.csv\", \n dtype=[('date', 'S8'), ('b1', float), ('b2', float), ('b3', float),\n ('b4', float), ('b5', float), ('b6', float), ('b7', float), ('b8', float),\n ('g1', float), ('g2', float), ('g3', float), ('g4', float), ('g5', float), \n ('g6', float), ('g7', float), ('g8', float), ('b1sd', float), ('b2sd', float), \n ('b3sd', float), ('b4sd', float), ('b5sd', float), ('b6sd', float), ('b7sd', float),\n ('b8sd', float), ('g1sd', float), ('g2sd', float), ('g3sd', float), ('g4sd', float),\n ('g5sd', float), ('g6sd', float), ('g7sd', float), ('g8sd', float)], skip_header=1, delimiter=',')\n\nthedates = []\n\nfor thisdate in data:\n yr = int(thisdate[0][0:4])\n month = int(thisdate[0][4:6])\n day = int(thisdate[0][6:])\n thedates.append(date(yr, month, day))\n\nthedates = np.asarray(thedates)\n\nrule = rrulewrapper(WEEKLY, interval=1)\nloc = RRuleLocator(rule)\nformatter = DateFormatter('%m/%d/%y')\n\nthedates = thedates[-7:]\ndata = data[-7:]\n\nwith PdfPages('coral_change_moorea_rb_short_rev20190811.pdf') as pdf:\n ## Page 1, Red\n fig = plt.figure(figsize=(8,10))\n\n ax = plt.subplot(2, 1, 1)\n ax.set_title('')\n for samp in range(1,9):\n bname = 'b'+(\"%1d\" % samp)\n temp = data[bname]\n good = np.not_equal(temp, -9.)\n plt.plot_date(thedates[good], temp[good], 'b')\n ax.xaxis.set_major_locator(loc)\n ax.xaxis.set_major_formatter(formatter)\n ax.xaxis.set_tick_params(rotation=30, labelsize=10)\n ax.set_ylabel('Bottom Reflectance')\n \n ax = plt.subplot(2, 1, 2)\n ax.set_title('')\n for samp in range(9,17):\n bname = 'g'+('%1d' % (samp-8))\n temp = data[bname]\n good = np.not_equal(temp, -9.)\n plt.plot_date(thedates[good], temp[good], 'g')\n ax.xaxis.set_major_locator(loc)\n ax.xaxis.set_major_formatter(formatter)\n ax.xaxis.set_tick_params(rotation=30, labelsize=10)\n ax.set_ylabel('Reflectance')\n pdf.savefig(fig)\n fig.savefig(\"slide_time_series_short_rb.png\", format='png')\n plt.close()\n\n ## d = pdf.infodict()\n ## d['Title'] = 'Coral Brightening at Moorea (July 2018 - July 2019)'\n ## d['Author'] = 'David Knapp'\n ## d['Subject'] = 'Allen Coral Atlas'\n ## d['CreationDate'] = date(2019, 7, 3)\n ## d['ModDate'] = date.today()\n\n","sub_path":"make_vulcan_plots4.py","file_name":"make_vulcan_plots4.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"620081925","text":"import tarantool\nimport logging\n\nHOST = \"localhost\"\nPORT = 33013\nSOCKET_TIMEOUT = 1\nRECONNECT_MAX_ATTEMPTS = 2\n\n\nclass Store(object):\n def __init__(self, log=True):\n self.connection = None\n self.host = HOST\n self.port = PORT\n self.socket_timeout = SOCKET_TIMEOUT\n self.reconnect_max_attempts = RECONNECT_MAX_ATTEMPTS\n self.log = log\n self.connect()\n\n def connect(self):\n for i in range(self.reconnect_max_attempts):\n try:\n self.connection = tarantool.Connection(\n host=self.host,\n port=self.port,\n socket_timeout=self.socket_timeout)\n except Exception:\n self.connection = None\n\n def disconnect(self):\n self.connection = None\n self.port = 100000\n self.connect()\n\n def get(self, cid):\n try:\n self.conn.ping()\n except Exception:\n self.connect()\n if not self.connection:\n raise tarantool.DatabaseError(\"Store not connected!\")\n tt_int = self.connection.call(\"get_interests\", cid)\n clients_interests = str(tt_int[0])\n clients_interests = clients_interests.replace(\"\\'\", \"\\\"\")\n return clients_interests\n\n def cache_get(self, uid):\n try:\n self.conn.ping()\n except Exception:\n self.connect()\n if not self.connection:\n return None\n tt_score = self.connection.call(\"cache_get_score\", uid)\n if tt_score[0]:\n score = \"%.1f\" % tt_score[0]\n else:\n return None\n return float(score)\n\n def cache_set(self, *args):\n try:\n self.conn.ping()\n except Exception:\n self.connect()\n if self.connection:\n try:\n fresh_values = (args[0], args[1], args[2])\n self.connection.call(\"cache_set_score\", fresh_values)\n tt_score = self.connection.call(\"cache_get_score\", args[0])\n return tt_score\n except Exception:\n if self.log:\n logging.warning(\"Store error!\")\n return None\n else:\n if self.log:\n logging.warning(\"Store not connected!\")\n return None\n\n def cache_delete(self, uid):\n try:\n self.conn.ping()\n except Exception:\n self.connect()\n if self.connection:\n tt = self.connection.space(\"score\")\n tt.delete(uid)\n return self\n","sub_path":"api/store.py","file_name":"store.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"303854984","text":"from Bio import SeqIO\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport logging\n\n# setup module logger\nlog = logging.getLogger(__name__)\nlog.addHandler(logging.NullHandler())\n\n\ndef plot_qualities_along_read(fqin, plotout):\n \"\"\"Make a boxplot with quality distribution along the read.\n\n :param fqin: input reads in fastq file\n :type fqin: file handle or path\n :param plotout: file with generated plot\n :type plotout: file handle or path\n \"\"\"\n\n # get read qualities\n try:\n read_quality_strs = [r.letter_annotations['phred_quality']\n for r in SeqIO.parse(fqin, 'fastq')]\n except FileNotFoundError:\n log.error('Input %s fastq file not found' % fqin)\n raise\n\n # preallocate the list of length of longest read\n max_rlen = max(len(r) for r in read_quality_strs)\n quals_per_pos = [[] for _ in range(max_rlen)]\n\n for read_qualities in read_quality_strs:\n for pos, base_quality in enumerate(read_qualities):\n quals_per_pos[pos].append(base_quality)\n\n # make a plot\n ## make ticks every 10 nucleotides\n xticks = np.arange(0, len(quals_per_pos) + 1, 20)\n plt.boxplot(quals_per_pos)\n plt.xticks(xticks)\n plt.title('Phread quality distribution along the read')\n plt.xlabel('Position in the read')\n plt.ylabel('Phred quality score')\n try:\n plt.savefig(plotout)\n except FileNotFoundError:\n log.error('Output filepath \"%s\" is not valid.' % plotout)\n raise\n\n\ndef plot_mean_quality_distribution(fqin, plotout):\n \"\"\"Make a histogram mean quality of the reads\n\n :param fqin: input reads in fastq file\n :type fqin: file handle or path\n :param plotout: file with generated plot\n :type plotout: file handle or path\n \"\"\"\n\n # get read qualities\n try:\n mean_read_qualities = [np.mean(r.letter_annotations['phred_quality'])\n for r in SeqIO.parse(fqin, 'fastq')]\n except FileNotFoundError:\n log.error('Input %s fastq file not found' % fqin)\n raise\n\n nbins = np.ceil(max(mean_read_qualities) - min(mean_read_qualities))\n plt.hist(mean_read_qualities, nbins)\n plt.title('Mean read phred quality distribution.')\n plt.xlabel('Mean read quality')\n plt.ylabel('Number of reads')\n try:\n plt.savefig(plotout)\n except FileNotFoundError:\n log.error('Output filepath \"%s\" is not valid.' % plotout)\n raise\n\n\ndef plot_rlen_distribution(fqin, plotout):\n \"\"\"Make a histogram of read lenghts\n\n :param fqin: input reads in fastq file\n :type fqin: file handle or path\n :param plotout: file with generated plot\n :type plotout: file handle or path\n \"\"\"\n\n # get read qualities\n try:\n rlens = [len(r) for r in SeqIO.parse(fqin, 'fastq')]\n except FileNotFoundError:\n log.error('Input %s fastq file not found' % fqin)\n raise\n\n nbins = max(rlens) - min(rlens)\n plt.hist(rlens, nbins)\n plt.title('Read length distribution.')\n plt.xlabel('Read length')\n plt.ylabel('Number of reads')\n try:\n plt.savefig(plotout)\n except FileNotFoundError:\n log.error('Output filepath \"%s\" is not valid.' % plotout)\n raise\n","sub_path":"farad/qc.py","file_name":"qc.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"466887364","text":"import numpy as np\nfrom scipy.optimize import minimize\n\nclass BradleyTerry:\n def __init__(self, comparisons, parsefunc = None):\n \"\"\"\n Constructor\n :param comparisons: list of comparisons\n :param parsefunc: optionally pass a custom parsign function to cope with different data formats\n \"\"\"\n parsefunc = parsefunc if parsefunc is not None else self.__parsefunc__\n self.items, self.comparisons, self.merits = parsefunc(comparisons)\n\n @staticmethod\n def __parsefunc__(comparisons) -> tuple:\n \"\"\"\n Function to parse supplied comparison data to the format needed by the model\n :param comparisons: comparison data\n :return\n \"\"\"\n items = list(set([x[0] for x in comparisons]+[x[1] for x in comparisons]))\n \n # Mapping\n items_parsed = {x: i for i, x in enumerate(items)}\n\n # Mapped comparisons\n comparisons_parsed = []\n for arg1_id, arg2_id, tie in comparisons:\n comparisons_parsed.append([\n items_parsed[arg1_id],\n items_parsed[arg2_id],\n tie\n ])\n\n # Initialize zero-vector for merits\n merits = np.zeros(len(items))\n\n return (items_parsed, comparisons_parsed, merits)\n\n @staticmethod\n def __pfunc__(i: float, j: float, t: float) -> float:\n \"\"\"\n Function to compute pairwise comparison probabilities of non-ties\n :param i: merit of the winning item\n :param j: merit of the loosing item\n :param s: annotation quality score\n :param t: difference threshold\n :return: propability of item i beating item j\n \"\"\"\n p = np.exp(i) / (np.exp(i) + np.exp(j) * np.exp(t))\n return np.log10(p)\n\n @staticmethod\n def __tfunc__(i: float, j: float, t: float) -> float:\n \"\"\"\n Function to compute pairwise comparison probabilities of ties\n :param i: merit of the winning item\n :param j: merit of the loosing item\n :param t: difference threshold\n :return: propability of item i beating item j\n \"\"\"\n f1 = np.exp(i) * np.exp(j) * (np.square(np.exp(t)) - 1)\n f2 = (np.exp(i) + np.exp(j) * np.exp(t)) * (np.exp(i) * np.exp(t) + np.exp(j))\n p = f1 / f2\n return np.log10(p)\n\n def __rfunc__(self, i: float, l: float) -> float:\n \"\"\"\n Function to compute regularized probability\n :param i: item merit\n :param l: regularization factor\n :return: value of __pfunc__ for matches with dummy item weighted by l\n \"\"\"\n return l * (self.__pfunc__(i, 1, 0) + self.__pfunc__(1, i, 0))\n\n def __log_likelihood__(self, merits: np.ndarray) -> float:\n \"\"\"\n Log-Likelihood Function\n :param merits: merit vector\n :return: log-likelihood value\n \"\"\"\n k: float = 0 # Maximization sum\n\n # Summing Edge Probabilities\n for arg1, arg2, tie in self.comparisons: \n if tie:\n k += self.__tfunc__(merits[arg1], merits[arg2], self.threshold)\n else:\n k += self.__pfunc__(merits[arg1], merits[arg2], self.threshold)\n\n # Regularization\n for x in range(len(self.items)): \n k += self.__rfunc__(merits[x], self.regularization)\n\n return -1 * k\n\n def fit(self, regularization: float = 0, threshold: float = 0) -> None:\n \"\"\"\n Optimize the model for merits\n :param regularization: regularization parameter\n :param threshold: difference threshold\n \"\"\"\n self.merits = np.ones(len(self.items))\n self.threshold = threshold\n self.regularization = regularization\n \n res = minimize(self.__log_likelihood__, self.merits, method='BFGS', options={\"maxiter\": 100})\n self.merits = res.x\n\n def get_merits(self, normalize=False) -> list:\n \"\"\"\n Returns the merits mapped to items\n :param normalize: if true, returns normalized merit vector to 0-1 range instead of original scores\n :return: dict in the form of {argument_id: merit} sorted by merits\n :exception: Exception if model was not fitted\n \"\"\"\n if not self.merits.any():\n raise Exception('Model has to be fitted first!')\n else:\n d = {argument_id: self.merits[index] for argument_id, index in self.items.items()}\n if normalize:\n mi = min(d.values())\n ma = max(d.values())\n normalize = lambda mi, ma, v: (v-mi)/(ma-mi)\n d.update({k: normalize(mi,ma,v) for k, v in d.items()})\n return sorted(d.items(), key=lambda kv: kv[1])\n","sub_path":"Webis-ArgQuality-20-Model/bradleyterry.py","file_name":"bradleyterry.py","file_ext":"py","file_size_in_byte":4711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"652906166","text":"from __future__ import print_function\nimport numpy as np\nimport random\nfrom tqdm import tqdm\nimport os, sys, pdb, math\nimport networkx as nx\nimport argparse\nimport scipy.io as sio\nimport scipy.sparse as ssp\nimport torch\nfrom torch_geometric.data import Data, Dataset, InMemoryDataset\nimport warnings\nwarnings.simplefilter('ignore', ssp.SparseEfficiencyWarning)\ncur_dir = os.path.dirname(os.path.realpath(__file__))\n#sys.path.append('%s/../../pytorch_DGCNN' % cur_dir)\n#sys.path.append('%s/software/node2vec/src' % cur_dir)\n#import node2vec\n\n\nclass MyDataset(InMemoryDataset):\n def __init__(self, data_list, root, transform=None, pre_transform=None):\n self.data_list = data_list\n super(MyDataset, self).__init__(root, transform, pre_transform)\n self.data, self.slices = torch.load(self.processed_paths[0])\n\n @property\n def raw_file_names(self):\n return []\n\n @property\n def processed_file_names(self):\n return ['data.pt']\n\n def download(self):\n # Download to `self.raw_dir`.\n pass\n\n def process(self):\n # Read data into huge `Data` list.\n data_list = self.data_list\n\n if self.pre_filter is not None:\n data_list = [data for data in data_list if self.pre_filter(data)]\n\n if self.pre_transform is not None:\n data_list = [self.pre_transform(data) for data in data_list]\n\n data, slices = self.collate(data_list)\n torch.save((data, slices), self.processed_paths[0])\n del self.data_list\n\n\ndef nx_to_PyGGraph(g, graph_label, node_labels, node_features, max_node_label, class_values):\n # convert networkx graph to pytorch_geometric data format\n y = torch.FloatTensor([class_values[graph_label]])\n if len(g.edges()) == 0:\n i, j = [], []\n else:\n i, j = zip(*g.edges())\n edge_index = torch.LongTensor([i+j, j+i])\n edge_type_dict = nx.get_edge_attributes(g, 'type')\n edge_type = torch.LongTensor([edge_type_dict[(ii, jj)] for ii, jj in zip(i, j)])\n edge_type = torch.cat([edge_type, edge_type], 0)\n edge_attr = torch.FloatTensor(class_values[edge_type]).unsqueeze(1) # continuous ratings, num_edges * 1\n x = torch.FloatTensor(one_hot(node_labels, max_node_label+1))\n if node_features is not None:\n x2 = torch.FloatTensor(node_features)\n x = torch.cat([x, x2], 1)\n data = Data(x, edge_index, edge_attr=edge_attr, y=y)\n data.edge_type = edge_type\n return data\n \n\ndef PyGGraph_to_nx(data):\n edges = list(zip(data.edge_index[0, :].tolist(), data.edge_index[1, :].tolist()))\n g = nx.from_edgelist(edges)\n edge_types = {(u, v): data.edge_type[i].item() for i, (u, v) in enumerate(edges)} # transform r back to rating label\n nx.set_edge_attributes(g, name='type', values=edge_types)\n node_types = dict(zip(range(data.num_nodes), torch.argmax(data.x, 1).tolist()))\n nx.set_node_attributes(g, name='type', values=node_types)\n g.graph['rating'] = data.y.item()\n return g\n\n\ndef links2subgraphs(\n A,\n train_indices, \n val_indices, \n test_indices, \n train_labels, \n val_labels, \n test_labels, \n h=1, \n max_nodes_per_hop=None, \n u_features=None, \n v_features=None, \n max_node_label=None, \n class_values=None, \n testing=False):\n # extract enclosing subgraphs\n if max_node_label is None: # if not provided, infer from graphs\n max_n_label = {'max_node_label': 0}\n\n def helper(A, links, g_labels):\n g_list = []\n with tqdm(total=len(links[0])) as pbar:\n for i, j, g_label in zip(links[0], links[1], g_labels):\n g, n_labels, n_features = subgraph_extraction_labeling((i, j), A, h, max_nodes_per_hop, u_features, v_features, class_values)\n if max_node_label is None:\n max_n_label['max_node_label'] = max(max(n_labels), max_n_label['max_node_label'])\n g_list.append((g, g_label, n_labels, n_features))\n else:\n g_list.append(nx_to_PyGGraph(g, g_label, n_labels, n_features, max_node_label, class_values))\n pbar.update(1)\n return g_list\n\n print('Enclosing subgraph extraction begins...')\n train_graphs = helper(A, train_indices, train_labels)\n if not testing:\n val_graphs = helper(A, val_indices, val_labels)\n else:\n val_graphs = []\n test_graphs = helper(A, test_indices, test_labels)\n\n if max_node_label is None:\n train_graphs = [nx_to_PyGGraph(*x, **max_n_label, class_values=class_values) for x in train_graphs]\n val_graphs = [nx_to_PyGGraph(*x, **max_n_label, class_values=class_values) for x in val_graphs]\n test_graphs = [nx_to_PyGGraph(*x, **max_n_label, class_values=class_values) for x in test_graphs]\n \n return train_graphs, val_graphs, test_graphs\n\n\ndef subgraph_extraction_labeling(ind, A, h=1, max_nodes_per_hop=None, u_features=None, v_features=None, class_values=None):\n # extract the h-hop enclosing subgraph around link 'ind'\n dist = 0\n u_nodes, v_nodes = [ind[0]], [ind[1]]\n u_dist, v_dist = [0], [0]\n u_visited, v_visited = set([ind[0]]), set([ind[1]])\n u_fringe, v_fringe = set([ind[0]]), set([ind[1]])\n for dist in range(1, h+1):\n v_fringe, u_fringe = neighbors(u_fringe, A, True), neighbors(v_fringe, A, False)\n u_fringe = u_fringe - u_visited\n v_fringe = v_fringe - v_visited\n u_visited = u_visited.union(u_fringe)\n v_visited = v_visited.union(v_fringe)\n if max_nodes_per_hop is not None:\n if max_nodes_per_hop < len(u_fringe):\n u_fringe = random.sample(u_fringe, max_nodes_per_hop)\n if max_nodes_per_hop < len(v_fringe):\n v_fringe = random.sample(v_fringe, max_nodes_per_hop)\n if len(u_fringe) == 0 and len(v_fringe) == 0:\n break\n u_nodes = u_nodes + list(u_fringe)\n v_nodes = v_nodes + list(v_fringe)\n u_dist = u_dist + [dist] * len(u_fringe)\n v_dist = v_dist + [dist] * len(v_fringe)\n subgraph = A[u_nodes, :][:, v_nodes]\n # remove link between target nodes\n subgraph[0, 0] = 0\n # construct nx graph\n g = nx.Graph()\n g.add_nodes_from(range(len(u_nodes)), bipartite='u')\n g.add_nodes_from(range(len(u_nodes), len(u_nodes)+len(v_nodes)), bipartite='v')\n u, v, r = ssp.find(subgraph) # r is 1, 2... (rating labels + 1)\n r = r.astype(int)\n v += len(u_nodes)\n #g.add_weighted_edges_from(zip(u, v, r))\n g.add_edges_from(zip(u, v))\n\n edge_types = dict(zip(zip(u, v), r-1)) # transform r back to rating label\n nx.set_edge_attributes(g, name='type', values=edge_types)\n\n # get structural node labels\n node_labels = [x*2 for x in u_dist] + [x*2+1 for x in v_dist]\n # get node features\n if u_features is not None:\n u_features = u_features[u_nodes]\n if v_features is not None:\n v_features = v_features[v_nodes]\n \n\n node_features = None\n if False:\n if u_features is not None and v_features is not None:\n u_extended = np.concatenate([u_features, np.zeros([u_features.shape[0], v_features.shape[1]])], 1)\n v_extended = np.concatenate([np.zeros([v_features.shape[0], u_features.shape[1]]), v_features], 1)\n node_features = np.concatenate([u_extended, v_extended], 0)\n\n # get identity features (one-hot encodings of node idxes)\n u_ids = one_hot(u_nodes, A.shape[0]+A.shape[1])\n v_ids = one_hot([x+A.shape[0] for x in v_nodes], A.shape[0]+A.shape[1])\n node_ids = np.concatenate([u_ids, v_ids], 0)\n\n #node_features = np.concatenate([node_features, node_ids], 1)\n node_features = None\n #node_features = node_ids\n #node_labels = [1] * len(labels)\n #node_labels = u_nodes + [x+A.shape[0] for x in v_nodes]\n\n return g, node_labels, node_features\n\n\ndef neighbors(fringe, A, row=True):\n # find all 1-hop neighbors of nodes in fringe from A\n res = set()\n for node in fringe:\n if row:\n _, nei, _ = ssp.find(A[node, :])\n else:\n nei, _, _ = ssp.find(A[:, node])\n nei = set(nei)\n res = res.union(nei)\n return res\n\n\ndef one_hot(idx, length):\n idx = np.array(idx)\n x = np.zeros([len(idx), length])\n x[np.arange(len(idx)), idx] = 1.0\n return x\n\n\ndef node_label(subgraph):\n # an implementation of the proposed double-radius node labeling (DRNL)\n K = subgraph.shape[0]\n subgraph_wo0 = subgraph[1:, 1:]\n subgraph_wo1 = subgraph[[0]+range(2, K), :][:, [0]+range(2, K)]\n dist_to_0 = ssp.csgraph.shortest_path(subgraph_wo0, directed=False, unweighted=True)\n dist_to_0 = dist_to_0[1:, 0]\n dist_to_1 = ssp.csgraph.shortest_path(subgraph_wo1, directed=False, unweighted=True)\n dist_to_1 = dist_to_1[1:, 0]\n d = (dist_to_0 + dist_to_1).astype(int)\n d_over_2, d_mod_2 = np.divmod(d, 2)\n labels = 1 + np.minimum(dist_to_0, dist_to_1).astype(int) + d_over_2 * (d_over_2 + d_mod_2 - 1)\n labels = np.concatenate((np.array([1, 1]), labels))\n labels[np.isinf(labels)] = 0\n labels[labels>1e6] = 0 # set inf labels to 0\n labels[labels<-1e6] = 0 # set -inf labels to 0\n return labels\n\n \ndef generate_node2vec_embeddings(A, emd_size=128, negative_injection=False, train_neg=None):\n if negative_injection:\n row, col = train_neg\n A = A.copy()\n A[row, col] = 1 # inject negative train\n A[col, row] = 1 # inject negative train\n nx_G = nx.from_scipy_sparse_matrix(A)\n G = node2vec.Graph(nx_G, is_directed=False, p=1, q=1)\n G.preprocess_transition_probs()\n walks = G.simulate_walks(num_walks=10, walk_length=80)\n walks = [map(str, walk) for walk in walks]\n model = Word2Vec(walks, size=emd_size, window=10, min_count=0, sg=1, \n workers=8, iter=1)\n wv = model.wv\n embeddings = np.zeros([A.shape[0], emd_size], dtype='float32')\n sum_embeddings = 0\n empty_list = []\n for i in range(A.shape[0]):\n if str(i) in wv:\n embeddings[i] = wv.word_vec(str(i))\n sum_embeddings += embeddings[i]\n else:\n empty_list.append(i)\n mean_embedding = sum_embeddings / (A.shape[0] - len(empty_list))\n embeddings[empty_list] = mean_embedding\n return embeddings\n\n\ndef AA(A, test_pos, test_neg):\n # Adamic-Adar score\n A_ = A / np.log(A.sum(axis=1))\n A_[np.isnan(A_)] = 0\n A_[np.isinf(A_)] = 0\n sim = A.dot(A_)\n return CalcAUC(sim, test_pos, test_neg)\n \n \ndef CN(A, test_pos, test_neg):\n # Common Neighbor score\n sim = A.dot(A)\n return CalcAUC(sim, test_pos, test_neg)\n\n\ndef CalcAUC(sim, test_pos, test_neg):\n pos_scores = np.asarray(sim[test_pos[0], test_pos[1]]).squeeze()\n neg_scores = np.asarray(sim[test_neg[0], test_neg[1]]).squeeze()\n scores = np.concatenate([pos_scores, neg_scores])\n labels = np.hstack([np.ones(len(pos_scores)), np.zeros(len(neg_scores))])\n fpr, tpr, _ = metrics.roc_curve(labels, scores, pos_label=1)\n auc = metrics.auc(fpr, tpr)\n return auc\n\n\n","sub_path":"util_functions.py","file_name":"util_functions.py","file_ext":"py","file_size_in_byte":11100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"37102548","text":"fname = input('Enter the file name: ')\ntry:\n\tfhand = open(fname)\nexcept:\n\tprint('File cannot be opened:',fname)\n#\tquit()\n\ncount = 0\nfor line in fhand:\n\tif line.startswith('my'):\n\t\tcount = count + 1\nprint('There were', count, '\\\"my\\\" lines in',fname)\n","sub_path":"py4e/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"541496618","text":"import smtplib\nimport os\nimport requests as reqs\nimport json\nimport urllib3\nfrom email.mime.text import MIMEText\ninput = os.getenv(\"bitbucket_url\")\nhostname = str(input)\ninput2 = os.getenv(\"project_name\")\nproject_key = str(input2)\ndef get_project_admin(hostname, project_key):\n url = '{hostname}/rest/api/1.0/projects/{project_key}/permissions/users?limit=99'.format(hostname=hostname,project_key=project_key)\n headers = {'Content-Type': 'application/json'}\n # params = {}\n # if limit:\n # params['limit'] = limit\n r = reqs.get(url, auth=('smohammed', 'Mommyd@d786'), headers=headers, verify=False)\n users_dump = r.json()\n user_email = []\n files = []\n all_values = users_dump['values']\n for index in range(len(all_values)):\n admin = users_dump['values'][index]['permission']\n if admin == 'PROJECT_ADMIN' :\n user_email.append(users_dump['values'][index]['user']['emailAddress'])\n return user_email\nadmin_list = get_project_admin(hostname, project_key)\nmsg = MIMEText('Bitbucket $projectkey braches to be deleted.csv has been generated. For status and output, please see https://jktools.tools.tsys.aws/job/getbranchesbyproject/$BUILD_NUMBER/console')\nrecipients = admin_list\nmsg['Subject'] = 'SCANNING for old branches in $project_name which are $no_day old in $bitbucket_url'\nmsg['From'] = 'digitaldevops@tsys.com'\nmsg['To'] = \", \".join(recipients)\nmsg['X-Priority'] = '2'\nfileMsg = email.mime.base.MIMEBase()\ns = smtplib.SMTP('mailrelay.qa.tpp.tsysecom.com:25')\ns.sendmail('digitaldevops@tsys.com', recipients, msg.as_string())\ns.quit()","sub_path":"sendemail.py","file_name":"sendemail.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"585971436","text":"import pyautogui\nimport uts\nimport random\nfrom time import sleep, time\nfrom datetime import datetime\npyautogui.FAILSAFE = True\nlocoal_commd = [1300, 695] #[1315,660,225,60 ]\n\n\nround1_aim = [(1135,575), ]\nround2_aim = [(905,525),(1280,380), (1313,145)]\nround3_aim = [(905,850),(773,640), (685,470),(473,365),]\nround3_aim2 = [ (685,470),(473,365)]\n\n\n\ndef clickairport():\n pyautogui.moveTo(*uts.random_cyclic(airport, maxr=50))\n pyautogui.click()\n\n\ndef clicklocoal_commd():\n pyautogui.moveTo(*uts.random_cyclic(locoal_commd, maxr=50))\n pyautogui.click()\n\ndef entry01():\n local = [596,347,410,200]\n uts.randomclick(local)\n sleep(0.63+random.random()/2)\n enrtylocal = (875,795,210,100)\n uts.randomclick(enrtylocal)\n\n\ndef emeryroundend():\n x = uts.get_color(pos=(1525,950))\n if x != 436990:\n return False\n return True\n\ndef planend(pos):\n x = uts.get_color(pos)\n if x != 13360495:\n return False\n return True\n\n\ndef mistake(error_coor,correct_coor):\n \"\"\" \n para: error_coor,correct_coor\n \"\"\"\n if random.random()>random.normalvariate(0.80,0.05):\n print(\"mistake\")\n uts.click_aim(error_coor)\n sleep(0.456+abs(random.normalvariate(0.2, 0.1)))\n uts.click_aim(correct_coor)\n sleep(0.456+abs(random.normalvariate(0.2, 0.1)))\n uts.click_aim(error_coor)\n\n\n\ndef battle():\n entry01()\n sleep(3+2*random.random())\n\n clicklocoal_commd()\n sleep(0.431+abs(random.normalvariate(0.2, 0.1)))\n uts.checkamry()\n sleep(1.331+abs(random.normalvariate(0.2, 0.1)))\n uts.start_mission()\n\n #uts.supply(clickairport)\n sleep(1.431+abs(random.normalvariate(0.2, 0.1)))\n \n isload = False\n while not isload:\n isload = uts.checkisload()\n print(\"supplying\")\n sleep(0.3)\n\n print(\"round 1\")\n\n sleep(0.431+abs(random.normalvariate(0.2, 0.1)))\n\n sleep(0.583+abs(random.normalvariate(0.2, 0.1)))\n clicklocoal_commd()\n sleep(0.583+abs(random.normalvariate(0.2, 0.1)))\n uts.click_aim(round1_aim[0])\n sleep(2+random.random()/2)\n uts.addamry(locoal_commd)\n uts.start_mission() # end mission\n\n print(\"emeary turn\")\n sleep(5)\n while 1:\n if emeryroundend():\n print(\"Yes\")\n break\n if uts.checkinbattle():\n sleep(0.3)\n while uts.checkinbattle():\n sleep(0.3)\n else:\n sleep(2)\n for i in range(4):\n x = random.normalvariate(900, 300)\n y = random.normalvariate(550, 200)\n pyautogui.moveTo(x, y, duration=0.25)\n pyautogui.click()\n sleep((0.383+abs(random.normalvariate(0.2, 0.1))))\n sleep(3.5)\n\n print(\"round 2\")\n uts.planTask()\n \n sleep(0.583+abs(random.normalvariate(0.2, 0.1)))\n uts.click_aim(round1_aim[0])\n sleep(0.583+abs(random.normalvariate(0.2, 0.1)))\n uts.click_aim(round2_aim[0])\n sleep(0.583+abs(random.normalvariate(0.2, 0.1)))\n clicklocoal_commd()\n sleep(0.583+abs(random.normalvariate(0.2, 0.1)))\n uts.click_aim(round2_aim[1])\n sleep(0.583+abs(random.normalvariate(0.2, 0.1)))\n uts.click_aim(round2_aim[2])\n sleep(0.583+abs(random.normalvariate(0.2, 0.1)))\n uts.start_plan()\n sleep(10)\n\n while 1:\n print(\"check_out_plan\")\n if planend((1460, 333)) :\n print(\"yes\")\n break\n elif planend((1441, 163)):\n print(\"yes\")\n sroll_y = 200\n while sroll_y > 0: \n x = random.normalvariate(1300, 100)\n y = random.normalvariate(300, 30)\n pyautogui.moveTo(x, y, duration=0.25)\n dx = random.normalvariate(50, 20)\n dy = abs(random.normalvariate(sroll_y, 50))\n pyautogui.dragRel(-dx,dy,duration=1+random.random())\n sroll_y -= dy\n break\n sleep(1)\n\n uts.start_mission() # end mission\n sleep(5)\n print(\"emeary turn\")\n while 1:\n if emeryroundend():\n print(\"Yes\")\n break\n if uts.checkinbattle():\n sleep(0.3)\n while uts.checkinbattle():\n sleep(0.3)\n else:\n sleep(2)\n for i in range(4):\n x = random.normalvariate(900, 300)\n y = random.normalvariate(550, 200)\n pyautogui.moveTo(x, y, duration=0.25)\n pyautogui.click()\n sleep((0.383+abs(random.normalvariate(0.2, 0.1))))\n sleep(0.5)\n # round 2\n print(\"round 3\")\n uts.planTask()\n \n sleep(0.583+abs(random.normalvariate(0.2, 0.1)))\n uts.click_aim(round3_aim[0])\n sleep(0.583+abs(random.normalvariate(0.2, 0.1)))\n uts.click_aim(round3_aim[1])\n sleep(0.583+abs(random.normalvariate(0.2, 0.1)))\n uts.click_aim(round3_aim[2])\n sleep(0.583+abs(random.normalvariate(0.2, 0.1)))\n uts.click_aim(round3_aim[3])\n sleep(0.583+abs(random.normalvariate(0.2, 0.1)))\n uts.start_plan()\n sleep(20)\n while 1:\n print(\"check_out_plan\")\n if planend((560,223)):\n print(\"yes\")\n break\n sleep(1)\n\n uts.start_mission() # end mission\n sleep(12+random.normalvariate(3, 1))\n print(\"end mission click\")\n for i in range(3):\n uts.end_click()\n sleep(0.6+abs(random.normalvariate(2, 2)/5))\n\n\ndef roundgap():\n num = 0\n is_return = uts.is_returnbattlemeun()\n sleep(abs(random.normalvariate(3, 1)))\n\n while not is_return:\n num+=1\n if num%10==0:\n uts.start_mission() # end mission\n is_return = uts.is_returnbattlemeun()\n print(\"ending_screen\")\n uts.end_click()\n sleep(abs(random.normalvariate(0.5, 0.2)))\n sleep(1.5+abs(random.normalvariate(2, 5)))\n\n\ndef autobattle(num,):\n sleep(2)\n for i in range(num):\n print(i+1,\"turn\")\n battle()\n sleep(3+random.normalvariate(2, 2)/5)\n roundgap()\n \n\n\n\n \n\nif __name__ == '__main__':\n import sys\n loop_num= 1#int(sys.argv[1])\n t = time()\n autobattle(loop_num)\n print(time()-t)\n print(datetime.now())\n if random.random()>0.4:\n quit2battle = [90,40,170,115]\n uts.randomclick(quit2battle)","sub_path":"jt01.py","file_name":"jt01.py","file_ext":"py","file_size_in_byte":6326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"9185005","text":"import logging\nimport numpy as np\nimport sys\nsys.path.append('..')\nimport MRATools as mt\n\n\n\nif __name__=='__main__':\n\n\n\n dim_x = 100\n\n locs = np.matrix(np.linspace(0, 1, dim_x).reshape((dim_x, 1)))\n\n # specify the bandwith\n b = 10\n h = locs[1] - locs[0]\n radius = float(b*h)\n print(\"radius=%f\" % radius)\n\n \n Sig = mt.KanterCovFun(locs, radius=radius, circular=False)\n mt.dispMat(Sig)\n nnz = np.count_nonzero(Sig[0,:])\n\n","sub_path":"pyMRA/tests/test-kanter-cov.py","file_name":"test-kanter-cov.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"421077898","text":"from html.parser import HTMLParser\nfrom urllib import parse\n\n\nclass Parser(HTMLParser):\n\n def __init__(self, base_url, url_to_parse):\n super().__init__()\n self.base_url = base_url\n self.url_to_parse = url_to_parse\n self.content = set()\n\n def error(self, message):\n pass\n\n def handle_starttag(self, tag, attrs):\n if tag == 'a':\n for (attribute, value) in attrs:\n if attribute == 'href':\n url = parse.urljoin(self.base_url, value)\n self.content.add(url)\n\n def get_page_content(self):\n return self.content\n\np = Parser('google.com','maps.google.com')\np.feed","sub_path":"htmlHandler.py","file_name":"htmlHandler.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"94838021","text":"# -*- coding:utf-8 -*-\n\"\"\"\n在windows下会乱码,在linux下正常。\nunicode中的‘\\xa0’字符在转换成gbk编码时会出现问题,gbk无法转换'\\xa0'字符。\n所以,在转换的时候必需进行一些前置动作:string.replace(u'\\xa0', u' ') \n抓取百度python搜索结果的第7页\n\"\"\"\n\nimport os, sys, time, random, math, re\nimport requests\nfrom lxml import etree\nimport psutil\nfrom pprint import pprint as pp\nfrom concurrent.futures import ThreadPoolExecutor\nimport gevent\nfrom gevent import monkey\n\nstart = time.time()\n\ndef bsizes (bites):\n if bites < 1024 :\n return \"%.0fB\" % (bites)\n for unit in ['B','K','M','G',\"T\"]:\n if bites < 1024 :\n break\n bites /= float(1024)\n return \"%.2f%s\" % (bites,unit)\n\ndef get_page_text (url):\n req = s.get(url, timeout=10, headers = headers, params=params)\n jtext = req.json()\n if req.status_code != 200:\n print ( req.status_code )\n return\n return jtext\n\ndef get_page_text1 (url):\n jtext = get_page_text(url)\n pages_list.append(jtext)\n\ndef gev_workers(func, args):\n monkey.patch_all()\n jobs = [gevent.spawn(func, arg) for arg in args]\n gevent.joinall(jobs)\n return (job.value for job in jobs)\n\ndef Threadspool(fun, args):\n with ThreadPoolExecutor(max_workers = 50) as executor: \n executor.map(fun, args)\n\n# 单线程\n\ndef get_all_pages_content(urls):\n return [ get_page_text(url) for url in urls ]\n\nos.path.exists('zhimg') or os.mkdir(\"zhimg\")\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',\n 'referer': 'https://www.zhihu.com',\n 'Upgrade-Insecure-Requests': '1',\n 'x-udid': 'AIBCqngplwuPTojnvs7qGj8feYM4tToTAHA=',\n 'authorization': 'oauth c3cef7c66a1843f8b3a9e6a1e3160e20',\n 'Accept': 'text/plain',\n 'Cookie': 'd_c0=\"AIBCqngplwuPTojnvs7qGj8feYM4tToTAHA=|1491902870\"; _zap=8e4d2a0d-6259-4027-a90a-b3ab507d751f; __utma=51854390.214622470.1496285526.1496812339.1497000955.6; __utmz=51854390.1497000955.6.6.utmcsr=zhihu.com|utmccn=(referral)|utmcmd=referral|utmcct=/question/47460465; __utmv=51854390.100-1|2=registration_date=20130926=1^3=entry_date=20130926=1; q_c1=22b66bc6088f44f99e06c23037cc503b|1497492137000|1494818268000; q_c1=22b66bc6088f44f99e06c23037cc503b|1497492137000|1494818268000',\n}\n\nparams = {\n 'include': 'data[*].is_normal,is_collapsed,content;data[*].author.follower_count,badge[?(type=best_answerer)].topics',\n 'sort_by': 'default',\n}\n\ns = requests.Session()\n\nrawurl = \"https://www.zhihu.com/question/58162135\"\nurl = \"https://www.zhihu.com/api/v4/questions/\" + os.path.basename(rawurl) +\"/answers?offset=0&limit=20\"\n\njtext = get_page_text (url)\n\nprint ( jtext['data'][0]['question']['title'] )\nprint ( \"回答数:\", jtext['paging']['totals'] )\n\nstart1 = time.time()\npages = round(jtext['paging']['totals']/20) + 1\nfetch_list = []\n\nfor i in range(pages):\n rpage = url.replace(\"offset=0\", \"offset=\" + str(i * 20))\n fetch_list.append(rpage)\n\npp(fetch_list)\n\npages_list = []\n\nThreadspool(get_page_text1, fetch_list)\n#pages_list = gev_workers(get_page_text, fetch_list)\n#pages_list = get_all_pages_content(fetch_list)\n\nlo = 0\n\nfor k in pages_list:\n for w in k['data']:\n lo = lo + 1\n print ( lo, w['author']['name'], re.sub('<[^<]+?>', '', w['author']['headline']) , re.sub('<[^<]+?>', '', w['author']['headline']), sep=\": \") #\n\nprint ( '---------------------------------------' );\n\nend = time.time()\nprint (\"time: %.5fs\" % (end - start))\nprint (\"time: %.5fs\" % (end - start1))\n\nprint ( '---------------------------------------' );\n\np = psutil.Process(os.getpid())\nmem = bsizes(p.memory_info().rss)\nprint (\"memory used: %s\" %(mem))\n\nprint ( \"线程数:{}\" . format( p.num_threads()) )\n","sub_path":"crawler/zhihu_name-session.py","file_name":"zhihu_name-session.py","file_ext":"py","file_size_in_byte":3814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"362213554","text":"#################################################################################\n# The Institute for the Design of Advanced Energy Systems Integrated Platform\n# Framework (IDAES IP) was produced under the DOE Institute for the\n# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021\n# by the software owners: The Regents of the University of California, through\n# Lawrence Berkeley National Laboratory, National Technology & Engineering\n# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University\n# Research Corporation, et al. All rights reserved.\n#\n# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and\n# license information.\n#################################################################################\nfrom typing import Dict\n\n\nclass UnitModelIcon:\n \"\"\"Represents icon display information for a given unit model.\n \"\"\"\n\n _link_positions_map = {} # cache 'built' link positions\n\n #: Name of default unit_model to use\n DEFAULT = \"default\"\n\n def __init__(self, unit_model: str = None, default: str = DEFAULT):\n \"\"\"Construct with a given unit model type name.\n\n Args:\n unit_model: Name of the unit model type. If not given, use value in attribute `DEFAULT`\n default: If the given `unit_model` is not found, use this one; but then if this is falsy raise a KeyError\n\n Raises:\n KeyError if unit_model name is not found and `default` arg is falsy (e.g. None or \"\")\n \"\"\"\n if unit_model is None:\n unit_model = self.DEFAULT\n self._model = unit_model\n try:\n self._info = self._mapping[unit_model]\n except KeyError:\n if not default:\n raise\n self._info = self._mapping[self.DEFAULT]\n self._pos = self._build_link_positions()\n\n @property\n def icon(self) -> str:\n \"\"\"Get the name of the icon.\n \"\"\"\n return self._info[0]\n\n @property\n def link_positions(self) -> Dict:\n \"\"\"Get the link positions.\n\n Example result::\n\n {\n \"groups\": {\n \"in\": {\n \"position\": {\n \"name\": \"left\",\n \"args\": {\"x\": 15, \"y\": 0, \"dx\": 1, \"dy\": 1},\n },\n \"attrs\": {\n \"rect\": {\n \"stroke\": \"#000000\",\n \"stroke-width\": 0,\n \"width\": 0,\n \"height\": 0,\n }\n },\n \"markup\": \"\",\n },\n \"out\": {\n \"position\": {\n \"name\": \"left\",\n \"args\": {\"x\": 48, \"y\": 45, \"dx\": 1, \"dy\": 1},\n },\n \"attrs\": {\n \"rect\": {\n \"stroke\": \"#000000\",\n \"stroke-width\": 0,\n \"width\": 0,\n \"height\": 0,\n }\n },\n \"markup\": \"\",\n },\n },\n \"items\": [{\"group\": \"in\", \"id\": \"in\"}, {\"group\": \"out\", \"id\": \"out\"}],\n }\n\n Returns:\n The link position (see example result)\n \"\"\"\n return self._pos\n\n def _build_link_positions(self) -> Dict:\n \"\"\"Fill in boilerplate based on raw info and place built value in class cache.\n Side-effects: set self._pos and add entry to class' _link_positions_map\n \"\"\"\n # look in cache, return if found\n if self._model in self._link_positions_map:\n return self._link_positions_map[self._model]\n\n # build link positions from info\n groups, items = {}, []\n for position in self._info[1:]:\n group, name, (x, y, dx, dy) = position\n if group not in groups:\n groups[group] = {}\n groups[group] = {\n \"position\": {\n \"name\": name,\n \"args\": {\"x\": x, \"y\": y, \"dx\": dx, \"dy\": dy},\n },\n \"attrs\": {\n \"rect\": {\n \"stroke\": \"#000000\",\n \"stroke-width\": 0,\n \"width\": 0,\n \"height\": 0,\n }\n },\n \"markup\": \"\",\n }\n items.append({\"group\": group, \"id\": group})\n\n # set new link positions attr and place in cache\n positions = {\"groups\": groups, \"items\": items}\n self._link_positions_map[self._model] = positions\n return positions\n\n # === Data ===\n\n # Name is unit name, value is the information for its icon\n # Value is a tuple: icon image, and one or more position tuples: (group [in/out], name [side], (x, y, dx, dy))\n # Notes for updating:\n # - Use 'cstr' as your template for new entries\n # - Do not remove in/out entries in existing entries, or arcs won't connect\n _mapping = {\n \"cstr\": (\n \"reactor_c.svg\",\n (\"in\", \"left\", (15, 0, 1, 1)),\n (\"out\", \"left\", (48, 45, 1, 1)),\n ),\n \"flash\": (\n \"flash.svg\",\n (\"bottom\", \"bottom\", (25, 50, 1, 1)),\n (\"in\", \"left\", (8, 25, 1, 1)),\n (\"top\", \"top\", (25, 0, 1, 1)),\n ),\n \"gibbs_reactor\": (\n \"reactor_g.svg\",\n (\"in\", \"left\", (5, 10, 1, 1)),\n (\"out\", \"left\", (45, 45, 1, 1)),\n ),\n \"heat_exchanger\": (\n \"heat_exchanger_1.svg\",\n (\"in\", \"left\", (2, 25, 1, 1)),\n (\"out\", \"left\", (48, 25, 1, 1)),\n ),\n \"heater\": (\n \"heater_2.svg\",\n (\"in\", \"left\", (6, 25, 1, 1)),\n (\"out\", \"left\", (43, 25, 1, 1)),\n ),\n \"heat_exchanger_1D\": (\n \"heat_exchanger_1.svg\",\n (\"in\", \"left\", (15, 0, 1, 1)),\n (\"out\", \"left\", (48, 45, 1, 1)),\n ),\n \"mixer\": (\n \"mixer.svg\",\n (\"in\", \"left\", (2, 25, 1, 1)),\n (\"out\", \"left\", (48, 25, 1, 1)),\n ),\n \"plug_flow_reactor\": (\n \"reactor_pfr.svg\",\n (\"in\", \"left\", (15, 0, 1, 1)),\n (\"out\", \"left\", (48, 45, 1, 1)),\n ),\n \"pressure_changer\": (\n \"compressor.svg\",\n (\"in\", \"left\", (2, 25, 1, 1)),\n (\"out\", \"left\", (48, 25, 1, 1)),\n ),\n \"separator\": (\n \"splitter.svg\",\n (\"in\", \"left\", (2, 25, 1, 1)),\n (\"out\", \"right\", (48, 25, 1, 1)),\n ),\n \"stoichiometric_reactor\": (\n \"reactor_s.svg\",\n (\"in\", \"left\", (5, 10, 1, 1)),\n (\"out\", \"left\", (45, 45, 1, 1)),\n ),\n \"equilibrium_reactor\": (\n \"reactor_e.svg\",\n (\"in\", \"left\", (5, 10, 1, 1)),\n (\"out\", \"left\", (45, 45, 1, 1)),\n ),\n \"feed\": (\"feed.svg\", (\"out\", \"left\", (48, 25, 1, 1))),\n \"product\": (\"product.svg\", (\"in\", \"left\", (2, 25, 1, 1))),\n \"feed_flash\": (\n \"feed.svg\",\n (\"in\", \"left\", (25, 0, 1, 1)),\n (\"out\", \"left\", (25, 50, 1, 1)),\n ),\n \"statejunction\": (\n \"NONE\",\n (\"in\", \"left\", (15, 0, 1, 1)),\n (\"out\", \"left\", (48, 45, 1, 1)),\n ),\n \"translator\": (\n \"NONE\",\n (\"in\", \"left\", (15, 0, 1, 1)),\n (\"out\", \"left\", (48, 45, 1, 1)),\n ),\n \"packed_column\": (\n \"packed_column_1.svg\",\n (\"in\", \"left\", (48, 10, 1, 1)),\n (\"out\", \"left\", (48, 40, 1, 1)),\n ),\n \"tray_column\": (\n \"tray_column_1.svg\",\n (\"in\", \"left\", (48, 10, 1, 1)),\n (\"out\", \"left\", (48, 40, 1, 1)),\n ),\n \"default\": (\n \"default.svg\",\n (\"in\", \"left\", (2, 0, 1, 1)),\n (\"out\", \"left\", (48, 50, 1, 1)),\n ),\n }\n","sub_path":"idaes/ui/icons.py","file_name":"icons.py","file_ext":"py","file_size_in_byte":8256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"560817202","text":"import re\nfrom operator import itemgetter\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef fetch_afisha_page():\n return requests.get(\n 'https://www.afisha.ru/cherepovec/schedule_cinema/'\n ).content\n\n\ndef parse_afisha_list(raw_html):\n soup = BeautifulSoup(raw_html, features=\"html.parser\")\n names_tags = soup.findAll('h3', class_=re.compile(r'cardTitle'))\n return list(\n map(\n lambda tag: tag.string.strip().strip('«»'),\n names_tags\n )\n )\n\n\ndef fetch_movie_info(movie_title):\n return requests.get(\n 'https://www.kinopoisk.ru/index.php',\n {\n 'kp_query': movie_title\n }\n ).content\n\n\ndef parse_movie_info(raw_html):\n soup = BeautifulSoup(raw_html, features=\"html.parser\")\n rating_tag = soup.select_one('div.element.most_wanted div.rating')\n none_int = 0\n if (rating_tag is None):\n rating_tag = soup.select_one('span.rating_ball')\n count_tag = soup.select_one('span.ratingCount')\n rating = float(rating_tag.string) if rating_tag else none_int\n votes_cnt = int(count_tag.string) if count_tag else none_int\n return rating, votes_cnt\n\n rating, votes_cnt = rating_tag['title'].split(' ')\n rating = float(rating)\n votes_cnt = int(re.sub(r'[()\\u00a0]', '', votes_cnt))\n return rating, votes_cnt\n\n\ndef output_movies_to_console(movies):\n movies = sorted(movies, key=itemgetter('rating'), reverse=True)[:10]\n for movie in movies:\n print('{title:<30} | {rating} ({votes_cnt})'.format(\n title=movie['title'],\n rating=movie['rating'],\n votes_cnt=movie['votes_cnt']\n ))\n\n\nif __name__ == '__main__':\n raw_html = fetch_afisha_page()\n\n titles = parse_afisha_list(raw_html)\n\n movies = []\n for title in titles:\n movie_html = fetch_movie_info(title)\n rating, votes_cnt = parse_movie_info(movie_html)\n movies.append({\n 'title': title,\n 'rating': rating,\n 'votes_cnt': votes_cnt\n })\n\n output_movies_to_console(movies)\n","sub_path":"cinemas.py","file_name":"cinemas.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"193308092","text":"# webslider from: ttps://github.com/webslides/webslides\r\nfrom lxml import etree\r\nimport requests\r\nimport chardet # 测试字节的编码格式\r\nimport re # 正则表达式\r\nimport datetime # 日期\r\nimport os\r\nimport sys\r\nimport webbrowser\r\nfrom bs4 import BeautifulSoup \r\nfrom easydict import EasyDict as edict\r\nimport subprocess\r\nimport time\r\n\r\nSSR = subprocess.Popen(\"data/static/ssr/ShadowsocksR-dotnet4.0.exe\")\r\n\r\n\r\ndef get_resource_path(relative_path): # 取得exe后相关资源绝对路径\r\n if hasattr(sys, '_MEIPASS'):\r\n return os.path.join(sys._MEIPASS, relative_path)\r\n return os.path.join(os.path.abspath(\".\"), relative_path)\r\n\r\n\r\ndef html_merge(file1, file2):\r\n f1 = open(file1, 'a+', encoding='utf-8')\r\n with open(file2, 'r', encoding='utf-8') as f2:\r\n f1.write('\\n')\r\n for i in f2:\r\n f1.write(i)\r\n\r\n\r\nurl = \"https://rsshub.app/initium/latest/zh-hans\" # 端传媒使用rsshub爬取\r\nheaders = {\r\n 'Accept': '*/*',\r\n 'Accept-Language': 'en-US,en;q=0.8',\r\n 'Cache-Control': 'max-age=0',\r\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36',\r\n 'Connection': 'keep-alive',\r\n 'Referer': 'http://www.baidu.com/'\r\n}\r\nproxies = {\r\n \"http\": \"http://127.0.0.1:1080\",\r\n \"https\": \"http://127.0.0.1:1080\",\r\n}\r\n\r\nreq = requests.get(url, proxies=proxies)\r\nxml_code = req.content.decode(req.encoding)\r\n# print(chardet.detect(xml_code)) # 测试字节的编码格式\r\ntree = etree.XML(xml_code.encode('utf-8')) # 不知为啥加了encode就可以了\r\n\r\n\r\ntitles = tree.xpath('//item/title/text()')\r\ndates = tree.xpath('//item/pubDate/text()')\r\ndate = tree.xpath('//span[@class = \"date\"]/text()')\r\narts = tree.xpath('//item/description/text()')\r\ncats = tree.xpath('//item/category/text()')\r\n# 需添加评论。。。\r\n\r\n\r\nfor i in range(len(dates)):\r\n tmp = dates[i].split(\" \")\r\n table = {\"Jan\": \"01\", \"Feb\": \"02\", \"Mar\": \"03\", \"Apr\": \"04\", \"May\": \"05\", \"Jun\": \"06\", \"Jul\": \"07\", \"Aug\": \"08\", \"Sept\": \"09\", \"Oct\": \"10\", \"Nov\": \"11\", \"Dec\": \"12\"}\r\n tmp[2] = table[tmp[2]]\r\n dates[i] = tmp[3] + \"/\" + tmp[2] + \"/\" + tmp[1]\r\n\r\n\r\nDict = {}\r\nfor i in range(len(dates)):\r\n data = [0, 1, 2]\r\n data[0] = dates[i]\r\n data[1] = cats[i]\r\n data[2] = arts[i]\r\n Dict[titles[i]] = data\r\n\r\nttitle, tdata = 0, 0\r\ntDict = {}\r\nfor title, data in Dict.items():\r\n today = (datetime.datetime.now() - datetime.timedelta(days = 1)).strftime(\"20%y/%m/%d\") # ***********************今天的新闻**************************\r\n if(data[0] == today):\r\n tDict[title] = data\r\n\r\nfor i in range(len(list(tDict.items()))): # 去掉href等杂文字\r\n soup = BeautifulSoup(list(tDict.items())[i][1][2], features=\"lxml\")\r\n for a in soup.findAll('a'):\r\n del a['href']\r\n del a['src']\r\n # print(a)\r\n\r\n# for i in range(len(tDict)):\r\n# print(sorted(tDict.values())[0][0])\r\n\r\nprint(len(list(tDict.items())))\r\n\r\nweb_file = 'data/報紙.html'\r\nhtml1 = 'data/static/html/html1.html'\r\nhtml2 = 'data/static/html/html2.html'\r\nweb = (web_file.split(\"/\")[-1])\r\n\r\nhtml_merge(web_file, html1)\r\n'''######################################################################################\r\nmyDict = []\r\n# 提取

    中文字\r\nfor i in range(len(list(tDict.items()))):\r\n pure_text = BeautifulSoup(list(tDict.items())[i][1][2], \"lxml\").find_all('p')\r\n pure_text = [sentence.string for sentence in pure_text]\r\n # print(pure_text[4])\r\n tempDict = edict({'article': pure_text, 'title': list(tDict.items())[i][0]})\r\n myDict.append(tempDict)\r\nprint()\r\n'''\r\nprint(list(tDict.items())[0][1][0])\r\nwith open(web_file, 'a', encoding='utf-8') as f: # 设置文件对象 \r\n for i in range(len(list(tDict.items()))): # len(list(tDict.items()))\r\n f.write('

    ') \r\n f.write('
    ')\r\n f.write(BeautifulSoup(list(tDict.items())[i][1][2], \"lxml\").prettify())\r\n f.write('
    ')\r\n f.write('
    ')\r\n # print(list(tDict.items())[i][0])\r\n # print(list(tDict.items())[i][1][2])\r\n\r\n\r\nhtml_merge(web_file, html2)\r\n\r\n\r\n# if(os.path.exists(web)):\r\n# os.remove(web)\r\n#os.rename(web_file, web)\r\nprint(get_resource_path(web_file))\r\nwebbrowser.open_new_tab(get_resource_path(web_file))\r\ntime.sleep(360)\r\nSSR.kill()\r\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"314602838","text":"import string\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom django.shortcuts import render, redirect\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom bestnid.decorators import staff_required\n\nfrom usuario.models import Usuario\nfrom subasta.models import Subasta\nfrom subasta.forms import SubastaForm,EditSubastaForm\nfrom categoria.models import Categoria\nfrom oferta.models import Oferta\nfrom comentario.models import Comentario\nfrom comentario.forms import ComentarioForm\nfrom notificacion.models import Notificacion\n\ndef buscar(criterio):\n\n\tpalabras = criterio.split(' ')\n\tincluidas = Subasta.objects.none()\n\tfor p in palabras:\n\t\tif p.isalnum():\n\t\t\tincluidas = incluidas | Subasta.objects.filter(descripcion__icontains = p)\n\t\t\tincluidas = incluidas |\tSubasta.objects.filter(titulo__icontains = p)\n\treturn incluidas\n\n\ndef busquedaValida(s):\n\treturn (not (s.isspace() or s == ''))\n\n\ndef busquedayfiltrado(request):\n\n\tcriterio=request.GET.get('criterio','')\n\tfiltro=request.GET.get('filtro','')\n\n\tif busquedaValida(criterio):\n\t\tsubastas = buscar(criterio)\n\telse:\n\t\tsubastas = Subasta.objects.all()\n\n\tif busquedaValida(filtro):\n\t\tsubastas = subastas.filter(categoria__nombre=filtro)\n\n\treturn subastas\n\n\ndef mostrar(request):\n\ts = Subasta.objects.all()[0]\n\treturn render(request,'subasta/subastas.html',{'subasta':s})\n\ndef activas(queryset):\n\treturn queryset.filter(fechaFin__gt = datetime.now,borrado=False)\n\n\ndef recientes(request):\n\n\tsubastas = busquedayfiltrado(request)\n\tif request.user.is_authenticated() and not request.user.is_staff:\n\t\tr = activas(subastas).exclude(subastador__user=request.user).order_by('-fechaInicio')\n\telse:\n\t\tr = activas(subastas).order_by('-fechaInicio')\n\tc =\tCategoria.objects.all()\n\t#contexto=\n\treturn render(request,'subasta/listado.html',{'subastas':r, 'categorias':c,'criterio':request.GET.get('criterio',''),'filtro':request.GET.get('filtro',''),'titulo':\"Subastas recientes\"})\n\n\ndef proximas(request):\n\n\tsubastas = busquedayfiltrado(request)\n\n\tif request.user.is_authenticated():\n\t\tr = activas(subastas).exclude(subastador__user=request.user).order_by('fechaFin')\n\telse:\n\t\tr = activas(subastas).order_by('fechaFin')\n\tc =\tCategoria.objects.all()\n\tcontexto={\n\t\t'subastas':r,\n\t\t'categorias':c,\n\t\t'criterio':request.GET.get('criterio',''),\n\t\t'filtro':request.GET.get('filtro',''),\n\t\t'titulo':'Subastas proximas a vencer'\n\t}\n\treturn render(request,'subasta/listado.html',contexto)\n\n\n@staff_required\ndef todas(request):\n\n\tsubastas = busquedayfiltrado(request)\n\tr = subastas.order_by('fechaInicio')\n\tc =\tCategoria.objects.all()\n\tcontexto={\n\t\t'subastas':r,\n\t\t'categorias':c,\n\t\t'criterio':request.GET.get('criterio',''),\n\t\t'filtro':request.GET.get('filtro',''),\n\t\t'titulo':'Todas las subastas'\n\t}\n\treturn render(request,'subasta/listado.html',contexto)\n\n@login_required\ndef mis_subastas(request):\n\tusuario = Usuario.objects.get(user=request.user)\n\tsubastas = usuario.subastas()\n\tcontexto={\n\t\t'subastas':subastas,\n\t}\n\treturn render(request,'subasta/mis_subastas.html',contexto)\n\n\n@login_required\ndef nueva_subasta(request):\n\n\tif request.method == 'POST':\n\t\tform = SubastaForm(request.POST, request.FILES)\n\t\tif form.is_valid():\n\t\t\tcantdias =request.POST['rango']\n\t\t\tduracion = timedelta(days=int(cantdias))\n\t\t\tsubasta = form.save(commit = False)\n\t\t\tsubasta.fechaFin = datetime.now() + duracion\n\t\t\tsubasta.subastador = Usuario.objects.get(user=request.user)\n\t\t\tsubasta.save()\n\t\t\tmensaje = \"La subasta ha sido creada correctamente.\"\n\t\t\tmessages.add_message(request, messages.SUCCESS, mensaje)\n\n\t\t\treturn redirect('mis_subastas')\n\t\t#context={'form':form}\n\t\t#return render(request,'subasta/nueva_subasta.html',context)\n\telse:\n\t\tform = SubastaForm()\n\n\tcontext={\n\t\t'form':form,\n\t}\n\treturn render(request,'subasta/nueva_subasta.html',context)\n\n\n@login_required\ndef editar_subasta(request,id):\n\n\ttry:\n\t\tsubasta = Subasta.objects.get(pk=id)\n\texcept Subasta.DoesNotExist:\n\t\tmensaje = \"Esta no es una subasta valida\"\n\t\treturn redirect('principal')\n\telse:\n\t\tif request.method == 'POST':\n\t\t\tform = EditSubastaForm(request.POST or None,instance=subasta, files=request.FILES)\n\t\t\tif form.is_valid():\n\t\t\t\tcantdias =request.POST['rango']\n\t\t\t\tduracion = timedelta(days=int(cantdias))\n\t\t\t\tsubasta = form.save(commit = False)\n\t\t\t\tsubasta.fechaFin = subasta.fechaInicio + duracion\n\t\t\t\tsubasta.subastador = Usuario.objects.get(user=request.user)\n\t\t\t\tsubasta.save()\n\t\t\t\tmensaje = \"Se guardaron los cambios correctamente\"\n\t\t\t\tmessages.add_message(request, messages.SUCCESS, mensaje)\n\t\t\t\treturn redirect('/subasta/ver/'+id)\n\t\t\telse:\n\t\t\t\tmensaje = \"Ocurrio un error. La subasta no ha sido editada\"\n\t\t\t\tmessages.add_message(request, messages.ERROR, mensaje)\n\t\t\t\tcontext={\n\t\t\t\t\t'form':form,\n\t\t\t\t}\n\t\t\t\treturn render(request, 'subasta/editar_subasta.html',context)\n\t\telse:\n\t\t\tform = EditSubastaForm(instance=subasta)\n\treturn render(request, 'subasta/editar_subasta.html',{'form':form})\n\n\ndef ver_subasta(request,id):\n\n\ttry:\n\t\tsubasta = Subasta.objects.get(pk=id,borrado=False)\n\texcept:\n\t\tmensaje = \"La subasta no existe\"\n\t\tmessages.add_message(request, messages.ERROR, mensaje)\n\t\treturn redirect('/subasta/recientes')\n\n\ttry:\n\t\tusuario = Usuario.objects.get(user__id=request.user.id)\n\texcept:\n\t\tpropia = False\n\t\tes_postor = False\n\t\tcomentarios = []\n\telse:\n\t\tpropia = subasta.es_propia(usuario)\n\t\tes_postor = usuario.es_postor_en(subasta)\n\t\tif propia:\n\t\t\tcomentarios = Comentario.objects.filter(subasta=subasta)\n\t\telse:\n\t\t\tcomentarios = Comentario.objects.filter(subasta=subasta,autor=usuario)\n\n\ttiene_ofertas = subasta.tiene_ofertas()\n\tfinalizada = subasta.finalizada()\n\ttiene_ganador = subasta.tiene_ganador()\n\n\tif request.method == \"POST\":\n\t\tform = ComentarioForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tcomentario = form.save(commit=False)\n\t\t\tcomentario.subasta = subasta\n\t\t\tcomentario.autor = usuario\n\t\t\tcomentario.save()\n\t\t\tmensaje = \"Un usuario comento en su subasta: %s\" % subasta.titulo\n\t\t\tnotificacion = Notificacion(usuario=subasta.subastador,mensaje=mensaje)\n\t\t\tnotificacion.url = subasta.url()\n\t\t\tnotificacion.save()\n\n\t\t\treturn HttpResponseRedirect('')\n\t\telse:\n\t\t\tprint(form.errors)\n\n\tform = ComentarioForm()\n\n\tcontext ={\n\t\t'subasta':subasta,\n\t\t'propia': propia,\n\t\t'es_postor':es_postor,\n\t\t'finalizada':finalizada,\n\t\t'tiene_ofertas':tiene_ofertas,\n\t\t'tiene_ganador':tiene_ganador,\n\t\t'comentarios':comentarios,\n\t\t'form':form\n\t}\n\n\treturn render(request,'subasta/subasta.html',context)\n\n\ndef eliminar_subasta(request,id):\n\n\tusuario = Usuario.objects.get(user__id=request.user.id)\n\tsubasta = Subasta.objects.get(pk=id)\n\tif subasta == None:\n\t\tmessages.add_message(request, messages.ERROR, 'No existe la subasta solicitada')\n\t\treturn redirect('/subasta/recientes')\n\tif not subasta.es_propia(usuario):\n\t\tmessages.add_message(request, message.ERROR, 'No es posible eliminar una subasta ajena')\n\t\treturn redirect('/subasta/ver/'+id)\n\n\tif request.method == 'POST':\n\t\tsubasta.borrado = True\n\t\tsubasta.save()\n\t\tmessages.add_message(request, messages.SUCCESS, 'Se ha eliminado su subasta')\n\t\treturn redirect('/subasta/recientes')\n\n\tcontext={\n\t\t'subasta':subasta,\n\t}\n\treturn render(request,'subasta/eliminar.html',context)\n\n\n\nfrom usuario.forms import RangoForm\n\ndef buscar_subastas(request):\n\n\tif request.method == 'POST':\n\t\tform = RangoForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tinicio = form.cleaned_data['fechaInicio']\n\t\t\tfin = form.cleaned_data['fechaFin']\n\t\t\tsubastas = Subasta.objects.all().filter(fechaInicio__gte=inicio).filter(fechaFin__lte=fin)\n\t\t\tcantidad = subastas.count()\n\t\t\tcontext = {\n\t\t\t\t'subastas':subastas,\n\t\t\t\t'fin':fin,\n\t\t\t\t'inicio':inicio,\n\t\t\t\t'cantidad': cantidad\n\t\t\t}\n\t\t\treturn render (request, 'subasta/resultados.html', context)\n\telse:\n\n\t\tform = RangoForm()\n\n\treturn render(request,'subasta/buscar_subastas.html',{'form':form})\n","sub_path":"bestnid/bestnid/apps/subasta/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"197356538","text":"import numpy as np\nimport h5py\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport h5py\nimport matplotlib.ticker as mticker\n\n\ndef plot_shock_wave(ntot, file_name, treshold, dx, sampling, dt, Nt):\n file = h5py.File(file_name, 'r')\n l = list(file.keys())\n\n n = np.array(file.get(l[ntot - 1])).T\n Nx = n.shape[0]\n\n n0 = (np.array(file.get(l[0])).T)[1]\n time = np.zeros(ntot)\n for i in range(ntot):\n time[i] = i*dt*Nt\n swpoints = np.zeros([ntot])\n for j in range(ntot):\n swpoints[j] = Nx-1;\n n = np.array(file.get(l[j])).T\n for i in range(Nx-1,-1,-1):\n if n[i] > treshold*n0:\n swpoints[j] = i*dx*sampling\n break\n swpoints[0] = 0\n print(\"plot shock wave\")\n plt.rcParams.update({'font.size': 40})\n plt.rcParams['text.usetex'] = True\n plt.rcParams['axes.linewidth'] = 4\n\n f1 = plt.figure(figsize=[10, 10])\n ax = f1.add_subplot(111)\n ax.tick_params(axis='x', size=10, width=4)\n ax.tick_params(axis='y', size=10, width=4)\n ax.minorticks_on()\n ax.plot(time, swpoints, linewidth=4)\n ax.set_xlabel(r'$t/\\omega_e$', fontsize=40, fontweight='bold')\n ax.set_ylabel(r'$x_{sh} \\omega_e/c$', fontsize=40, fontweight='bold')\n ax.minorticks_on()\n plt.savefig('smilei_shock_wave.png', bbox_inches='tight')\n plt.close()\n\n v = np.zeros(ntot-1)\n for i in range(ntot-1):\n v[i] = (swpoints[i+1] - swpoints[i])/(dt*Nt)\n f1 = plt.figure(figsize=[10, 10])\n ax = f1.add_subplot(111)\n ax.tick_params(axis='x', size=10, width=4)\n ax.tick_params(axis='y', size=10, width=4)\n ax.minorticks_on()\n ax.plot(time[0:ntot-1], v, linewidth=4)\n ax.set_xlabel(r'$t/\\omega_e$', fontsize=40, fontweight='bold')\n ax.set_ylabel(r'$V_{sh}/c$', fontsize=40, fontweight='bold')\n ax.minorticks_on()\n plt.savefig('smilei_shock_velocity.png', bbox_inches='tight')\n plt.close()","sub_path":"pySMILEI/plot_shock_wave.py","file_name":"plot_shock_wave.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"402655048","text":"import os\nimport uuid\n\nfrom django.contrib.contenttypes.fields import GenericForeignKey\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db import models\nfrom election.models import Election, ElectionDay\nfrom electionnight.managers import PageContentManager\nfrom geography.models import Division, DivisionLevel\n\nfrom .page_type import PageType\n\n\nclass PageContent(models.Model):\n \"\"\"\n A specific page that content can attach to.\n \"\"\"\n\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n\n allowed_types = (\n models.Q(app_label=\"geography\", model=\"division\")\n | models.Q(app_label=\"government\", model=\"office\")\n | models.Q(app_label=\"government\", model=\"body\")\n | models.Q(app_label=\"electionnight\", model=\"pagetype\")\n )\n content_type = models.ForeignKey(\n ContentType, limit_choices_to=allowed_types, on_delete=models.CASCADE\n )\n object_id = models.CharField(max_length=500)\n content_object = GenericForeignKey(\"content_type\", \"object_id\")\n\n election_day = models.ForeignKey(ElectionDay, on_delete=models.PROTECT)\n\n division = models.ForeignKey(\n Division, null=True, blank=True, on_delete=models.PROTECT\n )\n\n special_election = models.BooleanField(default=False)\n\n parent = models.ForeignKey(\n \"self\",\n null=True,\n blank=True,\n related_name=\"children\",\n on_delete=models.PROTECT,\n )\n\n objects = PageContentManager()\n\n featured = models.ManyToManyField(\n Election,\n blank=True,\n limit_choices_to={\"election_day__slug\": \"2018-11-06\"},\n )\n\n class Meta:\n unique_together = (\n \"content_type\",\n \"object_id\",\n \"election_day\",\n \"division\",\n )\n\n def __str__(self):\n return self.page_location()\n\n def page_location(self):\n \"\"\"\n Returns the published URL for page.\n \"\"\"\n cycle = self.election_day.cycle.name\n if self.content_type.model_class() == PageType:\n print(self.content_object)\n return self.content_object.page_location_template()\n elif self.content_type.model_class() == Division:\n if self.content_object.level.name == DivisionLevel.STATE:\n if self.special_election:\n # /{state}/special-election/{month-day}/\n path = os.path.join(\n self.content_object.slug,\n \"special-election\",\n self.election_day.special_election_datestring(),\n )\n else:\n # /{state}/\n path = self.content_object.slug\n else:\n # / National\n path = \"\"\n # Offices and Bodies\n else:\n if self.division.level.name == DivisionLevel.STATE:\n if not self.content_object.body:\n path = os.path.join(self.division.slug, \"governor\")\n else:\n path = os.path.join(\n self.division.slug, self.content_object.slug\n )\n else:\n path = self.content_object.slug\n return (\n os.sep + os.path.normpath(os.path.join(cycle, path)) + os.sep\n ) # normalized URL\n","sub_path":"electionnight/models/page_content.py","file_name":"page_content.py","file_ext":"py","file_size_in_byte":3369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"319221434","text":"import json\nfrom peewee import JOIN\nfrom apps.base import BaseHandler\nfrom utils.decorator.connector import Core_connector\nfrom router import route\nfrom loguru import logger\nfrom models.user import Branch,User,UserLinkRole,UserLinkBranch,\\\n MenuLinkMerchantSetting,UserAuth,UserRole,\\\n UserLinkMerchant,Merchant,SettingLinkMerchant\n\nfrom models.public import Menu\n\nfrom utils.exceptions import PubErrorCustom\n\nfrom apps.web.user.rule import BranchRules,UserRoleRules,\\\n UserRoleForMenuRules,UserRoleLinkRules,MerchantRules,\\\n MenuLinkMerchantSettingRules,UserRules\nfrom apps.web.user.serializers import BranchSerializer,MerchantSerializer,\\\n MerchantLinkUserSerializer,MenuLinkMerchantSettingSerializer\nfrom apps.web.public.serializers import MenuSerializer\n\nfrom apps.web.user.utils import user_query,get_merchants,get_merchant_setting_menus,get_merchant_default_setting_id\n\n@route()\nclass userinfo(BaseHandler):\n\n \"\"\"\n 用户\n \"\"\"\n\n @Core_connector(isTransaction=False,isMerchantVoid=True)\n async def get(self, pk=None):\n\n # merchant_obj=None\n # if self.user.role_type == '1':\n # merchant_obj = await self.merchant_token_handler()\n\n data = {\n \"userid\": self.user.userid,\n \"username\": self.user.name,\n \"rolecode\": \"\",\n \"role_type\": self.user.role_type,\n \"avatar\": 'https://timgsa.baidu.com/timg?image&quality=80&size=b9999_10000&sec=1604320145113&di=c0f37be5cc6331c65ec5773edbf7c1da&imgtype=0&src=http%3A%2F%2Fb-ssl.duitang.com%2Fuploads%2Fitem%2F201703%2F18%2F20170318012043_H4mRj.jpeg',\n \"menu\": [],\n \"merchants\": await get_merchants(self=self)\n }\n\n return {\"data\": data}\n\n@route()\nclass merchant_select(BaseHandler):\n\n @Core_connector(isTransaction=False)\n async def get(self, pk=None):\n\n return await get_merchants(self=self)\n\n@route(None, id=True)\nclass merchant_select_ok(BaseHandler):\n\n async def merchant_token_handler(self,merchant_id=None):\n\n if not merchant_id:\n raise PubErrorCustom(\"租户ID为空\")\n\n redis_cli = self.redisC(key=self.token)\n response = await redis_cli.get_dict()\n response['merchant_id'] = merchant_id\n await redis_cli.set_dict(response)\n return response\n\n @Core_connector(isMerchantVoid=True)\n async def put(self, pk=None):\n\n await self.merchant_token_handler(merchant_id=pk)\n\n\n@route(None,id=True)\nclass get_menu(BaseHandler):\n\n def recursion(self,rows,res,level=0):\n\n level += 1\n if level == 1:\n rows['children'] = [ item for item in res if item['parent_id'] == 0]\n self.recursion(rows['children'], res, level)\n else:\n for row in rows:\n row['children'] = [item for item in res if item['parent_id'] == row['id']]\n if not len(row['children']):\n return\n self.recursion(row['children'], res, level)\n\n\n @Core_connector(isTransaction=False)\n async def get(self, pk=None):\n menus = []\n\n for role in await self.db.execute(\n UserRole.select(). \\\n where(\n UserRole.status == '0',\n UserRole.role_id << [item.role_id for item in await self.db.execute(\n UserLinkRole.select().where(UserLinkRole.userid == self.user.userid)\n )]\n )\n ):\n menus += json.loads(role.menus)\n\n # if self.user.role_type == '1':\n # for linksetting in await self.db.execute (\n # MenuLinkMerchantSetting.select().where(\n # MenuLinkMerchantSetting.id <<\n # [\n # item.setting_id \\\n # for item in \\\n # await self.db.execute (\n # SettingLinkMerchant.select().where(\n # SettingLinkMerchant.merchant_id == self.user.merchant_id\n # )\n # )\n # ]\n # )\n # ):\n # menus += json.loads(linksetting.menus)\n\n menus = list(set(menus))\n\n res = json.loads(json.dumps(MenuSerializer(await self.db.execute(\n Menu.select().where(\n Menu.status == '0',\n Menu.id << menus\n )\n ),many=True).data))\n\n menus_res = [ item for item in res if item['type'] in ['0','2']]\n button_res = [ item for item in res if item['type'] in ['1']]\n\n menus={\n \"children\":[]\n }\n self.recursion(menus,menus_res)\n\n buttons={\n \"children\": []\n }\n self.recursion(buttons, button_res)\n\n return {\"data\":{\n \"menus\":menus,\n \"buttons\":buttons\n }}\n\n@route(None,id=True)\nclass user(BaseHandler):\n\n \"\"\"\n 用户管理\n \"\"\"\n\n async def add_before_handler(self,**kwargs):\n if self.user.merchant_id:\n self.data['role_type'] = '1'\n\n async def add_after_handler(self,**kwargs):\n\n mobile = self.data.get(\"mobile\",None)\n email = self.data.get(\"email\",None)\n login_name = self.data.get(\"login_name\", None)\n\n async def createUserAuth(account,type):\n if await self.db.count(\n UserAuth.select().where(UserAuth.account == account, UserAuth.type == type)) > 0:\n if type == '0':\n raise PubErrorCustom(\"登录账号已存在!\")\n elif type == '1':\n raise PubErrorCustom(\"手机号已存在!\")\n elif type == '2':\n raise PubErrorCustom(\"邮箱已存在!\")\n\n await self.db.create(UserAuth, **{\n \"userid\": self.pk,\n \"type\": type,\n \"account\": account,\n \"ticket\": self.data.get(\"password\")\n })\n\n if mobile:\n await createUserAuth(account=mobile,type=\"1\")\n\n if email:\n await createUserAuth(account=email,type=\"2\")\n\n if login_name:\n await createUserAuth(account=login_name,type=\"0\")\n\n if self.user.merchant_id:\n await self.db.create(UserLinkMerchant,**{\n \"userid\":self.pk,\n \"merchant_id\":self.user.merchant_id\n })\n\n async def upd_before_handler(self,**kwargs):\n\n pk = kwargs.get(\"pk\")\n\n mobile = self.data.get(\"mobile\", None)\n email = self.data.get(\"email\", None)\n login_name = self.data.get(\"login_name\", None)\n password = self.data.get(\"password\")\n\n if password:\n await self.db.execute(\n UserAuth.update({\n UserAuth.ticket : password\n }).where(\n UserAuth.userid == pk,\n UserAuth.is_password == '0'\n )\n )\n\n async def updUserAuth(account, type,pk):\n\n if await self.db.count(\n UserAuth.select().where(\n UserAuth.account == account,\n UserAuth.type == type,\n UserAuth.userid != pk)) > 0:\n if type == '0':\n raise PubErrorCustom(\"登录账号已存在!\")\n elif type == '1':\n raise PubErrorCustom(\"手机号已存在!\")\n elif type == '2':\n raise PubErrorCustom(\"邮箱已存在!\")\n\n try:\n user_auth_obj = await self.db.get(UserAuth,userid = pk,type = type)\n user_auth_obj.account = account\n await self.db.update(user_auth_obj)\n\n except UserAuth.DoesNotExist:\n\n res = await self.db.execute(\n UserAuth.select().where(\n UserAuth.userid == pk,\n UserAuth.is_password == '0'\n )\n )\n if not len(res):\n raise PubErrorCustom(\"系统异常{}\".format(pk))\n\n await self.db.create(UserAuth, **{\n \"userid\": self.pk,\n \"type\": type,\n \"account\": account,\n \"ticket\": res[0].ticket\n })\n\n if mobile:\n await updUserAuth(account=mobile, type=\"1\",pk=pk)\n\n if email:\n await updUserAuth(account=email, type=\"2\",pk=pk)\n\n if login_name:\n await updUserAuth(account=login_name, type=\"0\",pk=pk)\n\n @Core_connector(**{**UserRules.post(),\n **{\"add_after_handler\":add_after_handler,\"add_before_handler\":add_before_handler}})\n async def post(self,*args,**kwargs):\n return {\"data\":self.pk}\n\n @Core_connector(**{**UserRules.put(),**{\"upd_before_handler\":upd_before_handler}})\n async def put(self,*args,**kwargs):\n pass\n\n @Core_connector(isTransaction=False)\n async def get(self, pk=None):\n\n return await user_query(\n self=self,\n query= User.select(User),\n isMobile = True,\n isEmail = True,\n isLoginName = True,\n isBranch= True,\n isUserRole=True\n )\n\n @Core_connector(**UserRules.delete())\n async def delete(self,*args,**kwargs):\n pass\n\n@route(None,id=True)\nclass branch(BaseHandler):\n\n \"\"\"\n 部门管理\n \"\"\"\n\n async def upd_before_handler(self,**kwargs):\n\n logger.info(\"pk=>{},parent_branch_id=>{}\".format(kwargs.get(\"pk\"),self.data.get(\"parent_branch_id\")))\n if str(kwargs.get(\"pk\")) == str(self.data.get(\"parent_branch_id\")):\n self.data[\"parent_branch_id\"] = 0\n\n\n @Core_connector(**BranchRules.post())\n async def post(self,*args,**kwargs):\n return {\"data\":self.pk}\n\n @Core_connector(**{**BranchRules.put(),**{\"upd_before_handler\":upd_before_handler}})\n async def put(self,*args,**kwargs):\n pass\n\n @Core_connector(**BranchRules.delete())\n async def delete(self,*args,**kwargs):\n pass\n\n @Core_connector()\n async def get(self, pk=None):\n\n parent_branch_id = self.data.get(\"parent_branch_id\", 0)\n branch_name = self.data.get(\"branch_name\",None)\n status = self.data.get(\"status\",None)\n\n c = 0\n logger.info( self.user.merchant_id)\n async def recursion(parent_branch_id,c):\n c += 1\n\n query = Branch.select().where(\n Branch.parent_branch_id == parent_branch_id,\n Branch.merchant_id == self.user.merchant_id\n ).order_by(Branch.sort)\n\n if c == 1:\n if branch_name:\n query = query.where(Branch.branch_name == branch_name)\n if status:\n query = query.where(Branch.status == status)\n\n res = await self.db.execute(\n query\n )\n\n child = BranchSerializer(res, many=True).data\n\n if not len(child):\n return\n\n for item in child:\n item['child'] = await recursion(item['branch_id'],c)\n\n return child\n\n return {\"data\": await recursion(parent_branch_id=parent_branch_id,c=c)}\n\n@route(None,id=True)\nclass userrole0(BaseHandler):\n\n \"\"\"\n 系统角色管理\n \"\"\"\n\n async def add_before_handler(self,**kwargs):\n if self.user.merchant_id:\n self.data['role_type'] = '1'\n\n @Core_connector(**{**UserRoleRules.post(),\n **{\"add_before_handler\":add_before_handler}})\n async def post(self,*args,**kwargs):\n return {\"data\":self.pk}\n\n @Core_connector(**UserRoleRules.put())\n async def put(self,*args,**kwargs):\n pass\n\n @Core_connector(**UserRoleRules.delete())\n async def delete(self,*args,**kwargs):\n pass\n\n @Core_connector(**UserRoleRules.get())\n async def get(self, pk=None):\n pass\n\n@route(None,id=True)\nclass menu_for_role(BaseHandler):\n\n \"\"\"\n 角色获取菜单\n \"\"\"\n\n @Core_connector(**UserRoleForMenuRules.get())\n async def get(self, pk=None):\n pass\n\n@route(None,id=True)\nclass user_for_role(BaseHandler):\n\n \"\"\"\n 用户角色关联交易\n \"\"\"\n\n @Core_connector(isTransaction=False)\n async def get(self, pk=None):\n\n self.data['role_id'] = pk\n return await user_query(\n self=self,\n query= User.select(User),\n isMobile = True,\n isEmail = True,\n isBranch=True\n )\n\n @Core_connector()\n async def post(self,*args,**kwargs):\n\n userids = self.data.get(\"userids\")\n role_id = self.data.get(\"role_id\")\n\n if not len(userids):\n raise PubErrorCustom(\"授权用户列表为空!\")\n\n if not role_id:\n raise PubErrorCustom(\"角色代码为空!\")\n\n for item in userids:\n\n if await self.db.count(\n UserLinkRole.select().where(\n UserLinkRole.role_id == role_id,\n UserLinkRole.userid == item\n )\n ) <= 0:\n await self.db.create(UserLinkRole,**{\n \"role_id\" : role_id,\n \"userid\": item\n })\n\n @Core_connector()\n async def delete(self,pk=None):\n await self.db.execute(\n UserLinkRole.delete().where(\n UserLinkRole.role_id == pk,\n UserLinkRole.userid << self.data.get(\"ids\")\n )\n )\n\n@route(None,id=True)\nclass menulinkmerchantsetting(BaseHandler):\n\n \"\"\"\n 租户规则管理\n \"\"\"\n\n async def add_before_handler(self,**kwargs):\n \"\"\"\n 新增/修改前置处理\n \"\"\"\n if self.data.get(\"default\",None) and self.data.get(\"default\")=='0':\n for item in await self.db.execute(\n MenuLinkMerchantSetting.select().for_update().where(MenuLinkMerchantSetting.default == '0')):\n item.default = '1'\n await self.db.update(item)\n\n @Core_connector(**{**MenuLinkMerchantSettingRules.post(),**{\"add_before_handler\":add_before_handler}})\n async def post(self,*args,**kwargs):\n return {\"data\":self.pk}\n\n @Core_connector(**{**MenuLinkMerchantSettingRules.put(),**{\"upd_before_handler\":add_before_handler}})\n async def put(self,*args,**kwargs):\n pass\n\n @Core_connector(**MenuLinkMerchantSettingRules.delete())\n async def delete(self,*args,**kwargs):\n pass\n\n @Core_connector(**MenuLinkMerchantSettingRules.get())\n async def get(self, pk=None):\n pass\n\n@route(None,id=True)\nclass merchant(BaseHandler):\n\n \"\"\"\n 租户管理\n \"\"\"\n\n async def add_before_handler(self,**kwargs):\n\n account = self.data.get(\"account\",None)\n password = self.data.get(\"password\",\"e10adc3949ba59abbe56e057f20f883e\")\n merchant_name = self.data.get(\"merchant_name\",None)\n\n user_obj = await self.db.create(User, **{\n \"role_type\": \"1\",\n \"name\": \"{}_admin\".format(merchant_name)\n })\n\n await self.db.create(UserAuth, **{\n \"userid\": user_obj.userid,\n \"type\": '0',\n \"account\": account,\n \"ticket\": password\n })\n self.data['userid'] = user_obj.userid\n self.data['merchants'] = [\n {\n \"userid\":user_obj.userid\n }\n ]\n\n async def add_after_handler(self,**kwargs):\n\n merchant_id = self.pk\n\n await self.db.create(SettingLinkMerchant,**{\n \"setting_id\":await get_merchant_default_setting_id(self=self),\n \"merchant_id\": merchant_id,\n })\n\n role_obj = await self.db.create(UserRole,**{\n \"role_type\":\"1\",\n \"role_name\":\"管理员\",\n \"sort\":1,\n \"status\":\"0\",\n \"merchant_id\":merchant_id,\n \"menus\":json.dumps( await get_merchant_setting_menus(self=self,merchant_id=merchant_id))\n })\n\n await self.db.create(UserLinkRole,**{\n \"userid\":self.data['userid'],\n \"role_id\":role_obj.role_id\n })\n\n @Core_connector(**{**MerchantRules.post(),\\\n **{\"add_before_handler\":add_before_handler,\"add_after_handler\":add_after_handler}})\n async def post(self,*args,**kwargs):\n return {\"data\":self.pk}\n\n @Core_connector(**MerchantRules.put())\n async def put(self,*args,**kwargs):\n pass\n\n @Core_connector(**MerchantRules.delete())\n async def delete(self,*args,**kwargs):\n pass\n\n @Core_connector(isTransaction=False)\n async def get(self, pk=None):\n\n obj = await self.db.execute(\n Merchant.select(\n Merchant,\n User,\n UserAuth\n ).join(\n User, join_type=JOIN.LEFT_OUTER, on=(Merchant.userid == User.userid),\n ).join(\n UserAuth, join_type=JOIN.LEFT_OUTER, on=(UserAuth.userid == User.userid)\n ).where(\n UserAuth.type == '0'\n ).paginate(self.data['page'], self.data['size'])\n )\n\n return {\"count\":len(obj),\"data\":MerchantSerializer(obj,many=True).data}\n\n\n@route(None,id=True)\nclass merchant_for_setting(BaseHandler):\n\n \"\"\"\n 租户权限规则关联租户\n \"\"\"\n\n @Core_connector()\n async def post(self,*args,**kwargs):\n\n merchants = self.data.get(\"merchants\")\n setting_id = self.data.get(\"setting_id\")\n\n if not len(merchants):\n raise PubErrorCustom(\"租户列表为空!\")\n\n if not setting_id:\n raise PubErrorCustom(\"规则ID为空!\")\n\n for item in merchants:\n\n if await self.db.count(\n SettingLinkMerchant.select().where(\n SettingLinkMerchant.setting_id == setting_id,\n SettingLinkMerchant.merchant_id == item['merchant_id']\n )\n ) <= 0:\n await self.db.create(SettingLinkMerchant,**{\n \"setting_id\" : setting_id,\n \"merchant_id\": item['merchant_id']\n })\n\n @Core_connector(isTransaction=False)\n async def get(self, pk=None):\n\n obj = await self.db.execute(\n SettingLinkMerchant.select(SettingLinkMerchant,Merchant).\\\n join(Merchant, join_type=JOIN.INNER, on=(Merchant.merchant_id == SettingLinkMerchant.merchant_id)). \\\n where(SettingLinkMerchant.setting_id == pk)\n )\n\n if len(obj):\n return {\"data\":MerchantSerializer([ item.merchant for item in obj ],many=True).data}\n else:\n return {\"data\":[]}\n\n @Core_connector()\n async def delete(self,pk=None):\n await self.db.execute(\n SettingLinkMerchant.delete().where(\n SettingLinkMerchant.setting_id == pk,\n SettingLinkMerchant.merchant_id << self.data.get(\"ids\")\n )\n )\n\n@route(None,id=True)\nclass setting_for_merchant(BaseHandler):\n\n \"\"\"\n 租户关联权限\n \"\"\"\n\n @Core_connector()\n async def put(self, pk=None):\n\n settings = self.data.get(\"settings\",None)\n\n if not len(settings):\n raise PubErrorCustom(\"租户规则为空!\")\n\n for item in settings:\n\n if await self.db.count(\n SettingLinkMerchant.select().where(\n SettingLinkMerchant.setting_id == item['setting_id'],\n SettingLinkMerchant.merchant_id == pk\n )\n ) <= 0:\n await self.db.create(SettingLinkMerchant,**{\n \"merchant_id\": pk,\n \"setting_id\": item['setting_id']\n })\n\n @Core_connector(isTransaction=False)\n async def get(self, pk=None):\n\n obj = await self.db.execute(\n SettingLinkMerchant.select(SettingLinkMerchant,MenuLinkMerchantSetting).\\\n join(MenuLinkMerchantSetting, join_type=JOIN.INNER, on=(MenuLinkMerchantSetting.id == SettingLinkMerchant.setting_id)). \\\n where(SettingLinkMerchant.merchant_id == pk)\n )\n\n if len(obj):\n\n return {\"data\":MenuLinkMerchantSettingSerializer([item.menulinkmerchantsetting for item in obj],many=True).data}\n else:\n return {\"data\":[]}\n\n @Core_connector()\n async def delete(self,pk=None):\n await self.db.execute(\n SettingLinkMerchant.delete().where(\n SettingLinkMerchant.setting_id << self.data.get(\"ids\"),\n SettingLinkMerchant.merchant_id == pk\n )\n )","sub_path":"apps/web/user/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":21048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"217558219","text":"access_mode_template = [\n \"switchport mode access\",\n \"switchport access vlan\",\n \"switchport nonegotiate\",\n \"spanning-tree portfast\",\n \"spanning-tree bpduguard enable\",\n]\n\nport_security_template = [\n \"switchport port-security maximum 2\",\n \"switchport port-security violation restrict\",\n \"switchport port-security\",\n]\n\naccess_config = {\"FastEthernet0/12\": 10, \"FastEthernet0/14\": 11, \"FastEthernet0/16\": 17}\n\ndef generate_access_config(intf_vlan_mapping, access_template, psecurity=None):\n access_config = []\n\n for intf, vlan in intf_vlan_mapping.items():\n access_config.append(f\"interface {intf}\")\n for command in access_template:\n if command.endswith(\"access vlan\"):\n access_config.append(f\"{command} {vlan}\")\n else:\n access_config.append(command)\n if psecurity:\n access_config.extend(psecurity)\n return access_config\n","sub_path":"4/9.1-9.2a/9.1a.py","file_name":"9.1a.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"268855749","text":"from django.db import models\n\n# Create your models here.\nfrom django.db import models\nimport numpy as np \nimport pandas as pd\nfrom datetime import date\n\n\n#Keep this so that a number can be added for points.\nclass MinMaxFloat(models.FloatField):\n def __init__(self, min_value=None, max_value=None, *args, **kwargs):\n self.min_value, self.max_value = min_value, max_value\n super(MinMaxFloat, self).__init__(*args, **kwargs)\n\n def formfield(self, **kwargs):\n defaults = {'min_value': self.min_value, 'max_value' : self.max_value}\n defaults.update(kwargs)\n return super(MinMaxFloat, self).formfield(**defaults)\n\n\nclass House(models.Model):\n house_name = models.CharField(max_length = 200)\n total_points = models.CharField(max_length= 200)\n #image = models.FileField(default= None, upload_to='house_pictures')\n \n #def sum_points(self):\n #all_points= map(lambda x: x.point_amount, self.event_set.all())\n #return np.sum(list(all_points))\n # def __str__(self):\n # return f'{self.house_name} House'\n\n\nclass Event(models.Model):\n event_name = models.CharField(max_length = 200)\n comment = models.TextField(blank = True, null=True)\n point_amount = MinMaxFloat( min_value = 0.0, max_value = 1000000.0)\n #House = models.ForeignKey(House, on_delete= models.CASCADE)\n event_date = models.DateField('Event Date', default= date.today)\n\n \n","sub_path":"websiteproject/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"31848657","text":"import math \nimport numpy as np\n\ndef sph2cart(az, inc, r):\n\taz = math.radians(az)\n\tinc = math.radians(inc)\n\n\tx = r*math.sin(inc)*math.cos(az) \n\ty = r*math.sin(inc)*math.sin(az)\n\tz = r*math.cos(inc)\n\n\treturn np.array([x, y, z])\n\ndef orientation_transformation(pitch, yaw, roll, input_vector): \n\t# Clockwise rotations about axes\n\n\tpitch = math.radians(pitch) \n\tyaw = math.radians(yaw)\n\troll = math.radians(roll)\n\n\tpitchTransform = [[math.cos(pitch), 0, -math.sin(pitch)], [0, 1, 0], [math.sin(pitch), 0, math.cos(pitch)]]\n\tyawTransform = [[math.cos(yaw), -math.sin(yaw), 0], [math.sin(yaw), math.cos(yaw), 0], [0, 0 , 1]]\n\trollTransform = [[1, 0, 0], [0, math.cos(roll), -math.sin(roll)], [0, math.sin(roll), math.cos(roll)]]\n\n\tmatrixTransform = np.matmul(pitchTransform, yawTransform)\n\tmatrixTransform = np.matmul(matrixTransform, rollTransform)\n\n\treturn np.matmul(matrixTransform, input_vector) \n\ndef findAngle(vector):\n\tnormal = [0, 0, 1] # Normal of plane\n\tsin_angle = (np.dot(vector, normal))/(np.linalg.norm(vector) * np.linalg.norm(normal)) \n\treturn math.degrees(math.asin(sin_angle))\n\naz = 0\ninc = 90 \nr = 1 \n\narray_pitch = 35.9\narray_yaw = 157\narray_roll = 0\n\nspeaker_pitch = -array_pitch # Array pitch => Opposite of Speaker pitch \nspeaker_yaw = -array_yaw \nspeaker_roll = -array_roll\n\nplane_normal = [0, 0, 1]\n\nvector = sph2cart(az,inc,r) # Take the cartesian position of the speaker in space as a vector \n\nnew_vector = orientation_transformation(speaker_pitch, speaker_yaw, speaker_roll, vector) # Rotation of camera array mapped to corresponding rotation of speaker vector instead \n# Instead of taking the orientation of the array, take the corresponding orientation of the speaker vector instead \n# Note that must do pitch, then yaw, then roll in that order since I defined the matrix that way \n# This is because the plane changes with each rotation in any of the axes \n# However, when i'm doing the pitch, yaw and roll of the array, relatively I should keep the array in the reference plane and do the orientations with reference to the same plane. \n# i.e pitch up, but when i do azimuth I rotate about the original z axis still, not about the tilted z axis! \n\n\n# for array_yaw in range(0, 360, 10):\n# \tprint('For array_yaw of' + str(array_yaw))\n# \tspeaker_yaw = -array_yaw \n# \tnew_vector = pitch_transformation(speaker_pitch, speaker_yaw, vector)\n# \tprint(findAngle(new_vector))\n\nprint(findAngle(new_vector))\n\n\n#print(pitch_transformation(pitch, vector))\n\n# def dir_wrt_rotated_axes(az, inc, r): \n# \tx, y, z = sph2cart(az, inc, r)\n\n# Note that trigo functions convert between an angle and the ratio of two sides of a triangle \n# Cos, Sin and Tan take an angle in radians as input and returns the ratio \n# aCos, aSin, aTan take the ratio as input and return an angle in radians \n# Only convert the angles, never the ratios \n# https://en.wikipedia.org/wiki/Spherical_coordinate_system","sub_path":"src/rpy_sph.py","file_name":"rpy_sph.py","file_ext":"py","file_size_in_byte":2896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"457145510","text":"import asyncio\nimport json\nimport logging\nimport os\nimport random\nimport time\nimport urllib.request\nimport xml.etree.ElementTree as ElementTree\n\nimport discord\nimport redis\nfrom cleverbot import Cleverbot\n\ntry:\n redis_address = os.environ['REDIS_ADDRESS']\nexcept KeyError:\n redis_address = 'redis'\nclient = discord.Client()\ncb = Cleverbot()\nr = redis.StrictRedis(host=redis_address, port=6379, db=0)\nlog_time = time.strftime('%Y-%m-%d_%H:%M:%S')\ntry:\n os.mkdir('logs')\nexcept FileExistsError:\n pass\nlogging.basicConfig(format='%(asctime)s %(levelname)s %(name)s: %(message)s ', datefmt='%d/%m/%Y %H:%M:%S',\n filename='logs/{0}.log'.format(log_time), level=logging.DEBUG)\nlast_message = None\n\n\n@client.event\nasync def on_ready():\n print('Initializing bot...')\n await redis_check()\n print('--------------------------------------------------------------------')\n print('Discord API version is: {0}'.format(discord.__version__))\n print('Logfile: logs/{0}.log'.format(log_time))\n if discord.opus.is_loaded():\n logging.info('Opus successfully loaded')\n print('Opus successfully loaded')\n else:\n logging.error('Opus failed to load!')\n print('Opus failed to load. Voice functionality is unavailable')\n logging.info('Successfully logged in as {0} with id {1}'.format(client.user.name, client.user.id))\n print('Successfully logged in as {0} with id {1}'.format(client.user.name, client.user.id))\n try:\n status = redis_get('discord:status')\n if not status:\n status = 'https://git.io/vKcCg'\n await client.change_status(game=discord.Game(name=status))\n logging.info('Status is set to: \\'Playing {0}\\''.format(status))\n print('Status message: \\'Playing {0}\\''.format(status))\n except AttributeError or TypeError:\n logging.error('No status message set')\n print('No status message')\n print('--------------------------------------------------------------------')\n print('Bot is now awaiting for client messages...')\n print('And calling redis. Last message id is {0}'.format(last_message))\n\n\n@client.event\nasync def on_member_join(member):\n await client.send_message(member.server, 'Привет, {0}\\nДобро пожаловать на сервер {1}!'\n .format(member.mention, member.server.name))\n\n\n@client.event\nasync def on_message(message):\n content = message.content\n if message.author != client.user:\n if content.startswith('!'):\n try:\n prefix_and_command, arguments = content.split(maxsplit=1)\n arguments = arguments.split()\n except ValueError:\n prefix_and_command = content\n arguments = ''\n prefix = prefix_and_command[0]\n command = prefix_and_command[1:]\n logging.debug('Prefix: {0}'.format(prefix))\n logging.debug('Command: {0}'.format(command))\n arguments_string = ''\n for i in arguments:\n arguments_string = arguments_string + i + ' '\n logging.debug('Arguments: {0}'.format(arguments_string))\n if command == 'radio':\n radio()\n elif command == 'np':\n await client.send_message(message.channel, now_playing())\n elif command == 'issue':\n await client.send_message(message.channel, issue())\n elif command == 'get':\n await client.send_message(message.channel, redis_get(arguments[0]))\n elif command == 'set':\n await client.send_message(message.channel, redis_set(arguments[0], arguments[1]))\n elif client.user in message.mentions:\n # CleverBot-интеграция\n time_to_wait = random.random() * 10\n await client.send_typing(message.channel)\n await asyncio.sleep(time_to_wait)\n answer = cb.ask(message.content.replace(message.author.mention + ' ', '')) \\\n .encode('ISO-8859-1').decode('utf-8')\n await client.send_message(message.channel, '{0} {1}'.format(message.author.mention, answer))\n elif message.channel == discord.utils.find(lambda c: c.name == 'dev', message.server.channels):\n forward(message)\n print('[{1}] {0} {3}/#{4} {2}: {5}'.\n format(time.strftime('%H:%M:%S'),\n last_message, message.author.name, message.server, message.channel, message.content))\n\n\nasync def redis_check():\n global last_message\n if redis_get('message_query:last') is not None:\n last_message = int(redis_get('message_query:last'))\n else:\n last_message = 0\n return True\n while True:\n if redis_get('message_query:last') is not None:\n last_message = int(redis_get('message_query:last'))\n else:\n last_message = 0\n # Частота опроса Redis (точнее, время ожидания до следующего опроса)\n # 1 = 1 раз в секунду, 2 = каждые две секунды\n await asyncio.sleep(1)\n\n\ndef redis_get(key):\n try:\n content_b = r.get(key)\n content_str = content_b.decode('utf-8')\n logging.debug('Got {0} from Redis. Value is: {1}'.format(key, content_str))\n return content_str\n except AttributeError:\n logging.error('Could not get {0} from Redis'.format(key))\n return False\n\n\ndef redis_set(key, value):\n try:\n r.set(key, value)\n logging.debug('Set {0} to {1}'.format(key, value))\n return True\n except AttributeError:\n logging.error('Could not set {0} to {1} in Redis'.format(key, value))\n return False\n\n\ndef json_compose(message):\n json_msg = \\\n {'id': last_message,\n 'author': message.author.name,\n 'content': message.content,\n 'origin': 'discord'}\n return json.dumps(json_msg)\n\n\ndef json_parse(json_text):\n message = json.loads(json_text.replace(\"'\", \"\\\"\"))\n return '\\nType: {0}\\nContent: {1}\\nParsed: \\n\\nID: {3}\\nAuthor: {2}\\nContent: {5}\\nOrigin: {4}'.\\\n format(type(message),\n message,\n message.get('author'),\n message.get('id'),\n message.get('origin'),\n message.get('content'))\n\n\ndef forward(message):\n global last_message\n composed_msg = json_compose(message)\n redis_set('message_query:id{0}'.format(last_message), composed_msg)\n last_message += 1\n redis_set('message_query:last', last_message)\n logging.debug('Message id{0} forwarded'.format(last_message))\n\n\ndef issue():\n # TODO GitHub Issues Integration\n pass\n\n\ndef radio():\n # TODO Radio functionality\n pass\n\n\ndef now_playing():\n response = urllib.request.urlopen('http://radioparadise.com/xml/now.xml')\n tree = ElementTree.parse(response)\n root = tree.getroot()\n return '#nowplaying: {0} - {1} с альбома {2} \\n' \\\n 'http://www.radioparadise.com/rp_2.php?#name=Music&file=songinfo&song_id={3}'.\\\n format(root[0][3].text, root[0][4].text, root[0][6].text, root[0][5].text)\n\n\ntry:\n token = redis_get('discord:token')\n client.loop.create_task(redis_check())\n client.run(token)\nexcept FileNotFoundError:\n client.logout()\n print('Logged out')\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":7341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"273494694","text":"import autoarray as aa\nfrom toy_gaussian.src.pipeline import visualizer as vis\nimport os\nimport pytest\nfrom os import path\nimport shutil\nfrom autofit import conf\n\ndirectory = path.dirname(path.realpath(__file__))\n\n\n@pytest.fixture(name=\"plot_path\")\ndef make_visualizer_plotter_setup():\n return \"{}/../test_files/plotting/visualizer/\".format(\n os.path.dirname(os.path.realpath(__file__))\n )\n\n\n@pytest.fixture(autouse=True)\ndef set_config_path():\n conf.instance = conf.Config(\n path.join(directory, \"../test_files/plot\"), path.join(directory, \"output\")\n )\n\n\nclass TestPhaseImagingVisualizer:\n def test__visualizes_imaging_using_configs(\n self, masked_imaging_7x7, include_all, plot_path, plot_patch\n ):\n\n visualizer = vis.PhaseImagingVisualizer(\n masked_dataset=masked_imaging_7x7, image_path=plot_path\n )\n\n visualizer.visualize_imaging()\n\n assert plot_path + \"subplots/subplot_imaging.png\" in plot_patch.paths\n assert plot_path + \"imaging/image.png\" in plot_patch.paths\n assert plot_path + \"imaging/noise_map.png\" not in plot_patch.paths\n assert plot_path + \"imaging/psf.png\" in plot_patch.paths\n assert plot_path + \"imaging/signal_to_noise_map.png\" not in plot_patch.paths\n assert (\n plot_path + \"imaging/absolute_signal_to_noise_map.png\"\n not in plot_patch.paths\n )\n assert plot_path + \"imaging/potential_chi_squared_map.png\" in plot_patch.paths\n\n def test__visualizes_fit_and_inversion_using_configs(\n self,\n masked_imaging_7x7,\n fit_imaging_7x7,\n gaussians,\n include_all,\n plot_path,\n plot_patch,\n ):\n\n if os.path.exists(plot_path):\n shutil.rmtree(plot_path)\n\n visualizer = vis.PhaseImagingVisualizer(\n masked_dataset=masked_imaging_7x7, image_path=plot_path\n )\n\n visualizer.visualize_fit(\n fit=fit_imaging_7x7, gaussians=gaussians, during_analysis=False\n )\n\n assert plot_path + \"subplots/subplot_fit_imaging.png\" in plot_patch.paths\n assert plot_path + \"fit_imaging/image.png\" in plot_patch.paths\n assert plot_path + \"fit_imaging/noise_map.png\" not in plot_patch.paths\n assert plot_path + \"fit_imaging/signal_to_noise_map.png\" not in plot_patch.paths\n assert plot_path + \"fit_imaging/model_image.png\" in plot_patch.paths\n assert plot_path + \"fit_imaging/residual_map.png\" not in plot_patch.paths\n assert plot_path + \"fit_imaging/normalized_residual_map.png\" in plot_patch.paths\n assert plot_path + \"fit_imaging/chi_squared_map.png\" in plot_patch.paths\n\n image = aa.util.array.numpy_array_2d_from_fits(\n file_path=plot_path + \"fit_imaging/fits/image.fits\", hdu=0\n )\n\n assert image.shape == (5, 5)\n\n\nclass TestPhaseInterferometerVisualizer:\n def test__visualizes_interferometer_using_configs(\n self,\n masked_interferometer_7,\n general_config,\n include_all,\n plot_path,\n plot_patch,\n ):\n\n visualizer = vis.PhaseInterferometerVisualizer(\n masked_dataset=masked_interferometer_7, image_path=plot_path\n )\n\n visualizer.visualize_interferometer()\n\n assert plot_path + \"subplots/subplot_interferometer.png\" in plot_patch.paths\n assert plot_path + \"interferometer/visibilities.png\" in plot_patch.paths\n assert plot_path + \"interferometer/u_wavelengths.png\" not in plot_patch.paths\n assert plot_path + \"interferometer/v_wavelengths.png\" not in plot_patch.paths\n assert plot_path + \"interferometer/primary_beam.png\" in plot_patch.paths\n\n def test__visualizes_fit_using_configs(\n self,\n masked_interferometer_7,\n fit_interferometer_7,\n gaussians,\n include_all,\n plot_path,\n plot_patch,\n ):\n\n visualizer = vis.PhaseInterferometerVisualizer(\n masked_dataset=masked_interferometer_7, image_path=plot_path\n )\n\n visualizer.visualize_fit(\n fit=fit_interferometer_7, gaussians=gaussians, during_analysis=True\n )\n\n assert plot_path + \"subplots/subplot_fit_interferometer.png\" in plot_patch.paths\n assert plot_path + \"fit_interferometer/visibilities.png\" in plot_patch.paths\n assert plot_path + \"fit_interferometer/noise_map.png\" not in plot_patch.paths\n assert (\n plot_path + \"fit_interferometer/signal_to_noise_map.png\"\n not in plot_patch.paths\n )\n assert (\n plot_path + \"fit_interferometer/model_visibilities.png\" in plot_patch.paths\n )\n assert (\n plot_path + \"fit_interferometer/residual_map_vs_uv_distances_real.png\"\n not in plot_patch.paths\n )\n assert (\n plot_path\n + \"fit_interferometer/normalized_residual_map_vs_uv_distances_real.png\"\n in plot_patch.paths\n )\n assert (\n plot_path + \"fit_interferometer/chi_squared_map_vs_uv_distances_real.png\"\n in plot_patch.paths\n )\n","sub_path":"toy_gaussian/test/unit/test_pipeline/test_visualizer.py","file_name":"test_visualizer.py","file_ext":"py","file_size_in_byte":5140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"307233179","text":"def reverse(s):\n\n # Base Case\n if len(s) <= 1:\n return s\n # recursion\n else:\n return reverse(s[1:]) + s[0]\n\n\nprint(reverse(\"Shark\"))\n\n# Don't worry about the code below its for reference\n\na = 'krahS'\n\nb = a[0:4] + a[-1]\n\nprint(b)\n","sub_path":"Recursion/reverse_string_recursively.py","file_name":"reverse_string_recursively.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"528376831","text":"# -*-coding=utf-8-*-\nimport random\n# from sshtunnel import SSHTunnelForwarder\nimport redis\nimport pymysql, sqlite3\nimport pandas as pd\nfrom toolkit import Toolkit\nimport json, os\nfrom setting import get_mysql_conn, get_engine\nimport time\nimport threading\nfrom twisted.enterprise import adbapi\nfrom twisted.internet import reactor, defer\nimport config\n\n\n# engine = get_engine('daily')\n# conn = get_mysql_conn(db,local=False)\n\n# 统计那个邮箱的用户最多\ndef groupcheck():\n conn = get_mysql_conn('',local='ali')\n cur = conn.cursor()\n cmd = 'select `email` from aws_users group by`email`'\n cur.execute(cmd)\n ret = cur.fetchall()\n domain = {}\n for i in ret:\n mail = i[0].split('@')[1]\n domain.setdefault(mail, 0)\n domain[mail] += 1\n\n result = sorted(domain.items(), key=lambda x: x[1], reverse=True)\n print(result[-100:])\n\n\nclass MysqlUsage():\n def __init__(self):\n self.conn = get_mysql_conn('db_zdt', local=True)\n\n def getVersion(self):\n cur = self.db.cursor()\n cur.execute('select version()')\n data = cur.fetchone()\n print(data)\n\n def query(self):\n cursor = self.db.cursor()\n cmd = 'select * from `{}` where datetime = \\'{}\\''\n cursor.execute(cmd.format('300333', '2017-11-15'))\n data = cursor.fetchall()\n for i in data[0]:\n print(i, )\n print\n print(data[0])\n '''\n for i in data:\n print(i)\n '''\n\n def delete_item(self):\n cursor = self.db.cursor()\n cmd = 'select table_name from information_schema.`TABLES` where table_schema=\\'{}\\';'\n cursor.execute(cmd.format('history'))\n data = cursor.fetchall()\n for i in data:\n code = i[0]\n cmd_del = 'delete from `{}` where datetime = \\'2017-11-17\\';'\n try:\n cursor.execute(cmd_del.format(code))\n # print(cursor.fetchall())\n self.db.commit()\n except Exception as e:\n print(e)\n self.db.rollback()\n\n def modify_table(self):\n engine_line = get_engine('db_selection')\n df = pd.read_sql_table('xiayinxian', engine_line, index_col='index')\n df['ocupy_ration'] = df['ocupy_ration'].map(lambda x: '%.3f' % x)\n # print(df)\n df.to_sql('xiayingxian', engine_line)\n\n def sql_table(self):\n\n df = pd.read_sql_table('2017-11-17', engine, index_col='index')\n\n def DB_Usage(self):\n\n db1 = sqlite3.connect(\"df_sql3.db\")\n data = [[1, 2, 3, 4], [3, 4, 5, 6], [54, 234, 23, 222]]\n df1 = pd.DataFrame(data)\n print(df1)\n df1.to_sql(\"data\", db1)\n\n def DB_Usage_sqlite(self):\n db = sqlite3.connect(\"db_sql_test.db\")\n # cursor=db.cursor()\n # cursor.execute(\"SELECT VERSION()\")\n # data=cursor.fetchone()\n # print(data)\n db.close()\n cmd = 'SELECT * from person;'\n df = pd.read_sql(cmd, db)\n print(df)\n\n def Aliyun(self):\n passwd = Toolkit.getUserData('data.cfg')['alipasswd']\n print(passwd)\n conn = pymysql.connect(host='', # 远程主机的ip地址,\n user='', # MySQL用户名\n db='', # database名\n passwd=passwd, # 数据库密码\n port=3306, # 数据库监听端口,默认3306\n charset=\"utf8\") # 指定utf8编码的连接\n cursor = conn.cursor()\n cursor.execute('SELECT VERSION()')\n data = cursor.fetchone()\n print(data)\n # 已经连通了,可以开搞。\n id = 31\n cmd1 = \"insert into `aws_user_action_history_data`(`history_id`,`associate_content`,`associate_attached`,`addon_data`) values ('%d','huati','',''),('%d','','',''),('%d','Rocky-Title','ROCK-Content','')\" % (\n id, id + 1, id + 2)\n cursor.execute(cmd1)\n # conn.commit()\n topic_id = 7\n title = \"Hello\" + str(topic_id)\n cmd2 = \"insert into `aws_topic`(`topic_id`,`topic_title`,`add_time`,`discuss_count`,`topic_description`,`topic_pic`,`topic_lock`,`focus_count`,`user_related`,`url_token`,`merged_id`,`seo_title`,`parent_id`,`is_parent`,`discuss_count_last_week`,`discuss_count_last_month`,`discuss_count_update`) values('%d','huati5','1493370061','1','',null,'0','1','0',null,'0',null,'0','0','1','1','1493370061')\" % topic_id\n cursor.execute(cmd2)\n # cursor.commit()\n arti_id = 5\n cmd3 = \"insert into `aws_article`(`id`,`uid`,`title`,`message`,`comments`,`views`,`add_time`,`has_attach`,`lock`,`votes`,`title_fulltext`,`category_id`,`is_recommend`,`chapter_id`,`sort`) values('%d','1','ddThe Rocky23 IC ************TITLE','dddThe Rocky IC Content !!!!!!!!','0','1','1493370061','0','0','0','ic title','1','0',null,'0')\" % arti_id\n cursor.execute(cmd3)\n # cursor.commit()\n cmd4 = \"insert into `aws_topic_relation`(`id`,`topic_id`,`item_id`,`add_time`,`uid`,`type`) values('3','4','3','1493370061','1','article')\"\n # cursor.execute(cmd4)\n conn.commit()\n conn.close()\n\n def create_table(self, table_name):\n cursor = self.db.cursor()\n create_cmd = '''\n CREATE TABLE ROCKY(\n NAME TEXT,CITY_NAME TEXT,LOCATION TEXT,PRICE TEXT\n );\n '''\n print(create_cmd)\n cursor.execute(create_cmd)\n self.db.commit()\n self.db.close()\n\n def mysql_add_data(self, table):\n cursor = self.db.cursor()\n creat_db = '''\n create table if not exists houseinfo(\n name TEXT,city_name TEXT, location TEXT,price TEXT\n );\n '''\n cursor.execute(creat_db)\n self.db.commit()\n\n cursor.execute('select version()')\n data = cursor.fetchone()\n print(data)\n print(type(data))\n my_dict = {\"2017-07\": [{\"origin\": \"LJ\", \"price\": 44267, \"crawl_date\": \"2017-09-01\"}]}\n price = json.dumps(my_dict)\n print(price)\n item2 = {'name': '万科', 'city_name': '深圳', 'location': '龙岗', 'price': price}\n item = {'name': 'wk2', 'city_name': '1sz1', 'location': 'lg1', 'price': '12111'}\n print(item)\n\n sql = '''\n insert into houseinfo ( name,city_name,location,price)\n values ('wk','sz','lg',12) \n '''\n\n # 这个是可以正常运行的\n sql2 = '''insert into houseinfo ( name,city_name,location,price) values ('%s','%s','%s','%s')''' % (\n item2['name'], item2['city_name'], item2['location'], item2['price'])\n print(sql2)\n # sql 插入有问题\n cursor.execute(sql2)\n\n query_cmd = '''\n select name from first;\n '''\n # cursor.execute(sql)\n # data1=cursor.fetchone()\n # print(data1)\n\n self.db.commit()\n self.db.close()\n\n def query_base(self):\n vol = 500\n table = 'tick0901'\n sql_cmd1 = '''\n select * from %s where volume>%d;\n ''' % (table, vol)\n cursor = self.db.cursor()\n cursor.execute(sql_cmd1)\n # dataone=cursor.fetchone()\n dataall = cursor.fetchall()\n # print(dataone)\n for i in dataall:\n print(i[0], i[1], i[2], i[3], i[4], i[5], i[6])\n\n def update(self):\n sql_cmd = '''\n update tick0901 set type='NA' where type='中性盘'\n '''\n cursor = self.db.cursor()\n cursor.execute(sql_cmd)\n self.db.commit()\n self.db.close()\n\n def transfer_data(self):\n fp = open('houseinfo_origin_all.json', 'r')\n cursor = self.db.cursor()\n linenumber = 0\n while 1:\n try:\n line = fp.readline()\n item = json.loads(line.strip())\n sql_cmd = '''\n insert into houseinfo(name,city_name,building_type,building_data,location,price) values('%s','%s','%s','%s','%s','%s')\n ''' % (item['name'], item['city_name'], item['building_type'], item['building_date'], item['location'],\n json.dumps(item['price']))\n cursor.execute(sql_cmd)\n linenumber = linenumber + 1\n except Exception as e:\n print(e)\n print(\"EOF\")\n break\n self.db.commit()\n self.db.close()\n print(linenumber)\n # print(line)\n\n def replace(self):\n cursor = self.db.cursor()\n cmd = '''\n insert into \n '''\n\n def show_all_table(self):\n cur = self.conn.cursor()\n cmd = 'show tables;'\n cur.execute(cmd)\n content = cur.fetchall()\n for item in content:\n print(item[0])\n\n\ndef create_db_case():\n low_db = get_mysql_conn('db_selection')\n low_cursor = low_db.cursor()\n code = '12345'\n cur_low = 12.22\n date = '2017-01-11'\n name = u'总公司'\n create_cmd = 'create table if not exists break_low ' \\\n '(`index` int primary key auto_increment,code text ,name text , cur_low float ,datetime datetime);'\n low_cursor.execute(create_cmd)\n insert_cmd = 'insert into break_low (code,name,cur_low,datetime) values (%s,%s,%s,%s);'\n low_info = (code, name, cur_low, date)\n low_cursor.execute(insert_cmd, low_info)\n low_db.commit()\n\n# 删除某一行 根据条件where\ndef remove_row():\n r = redis.StrictRedis('localhost', 6379, db=0)\n db = get_mysql_conn('history')\n cur = db.cursor()\n for k in r.keys():\n print(k)\n cmd = 'delete from `{}` where datetime > \\'2017-11-16\\';'.format(k)\n try:\n cur.execute(cmd)\n db.commit()\n except:\n db.rollback()\n db.close()\n\n# 运行sql文件 无法运行\ndef run_sql_script():\n\n conn = get_mysql_conn('db_rocky',local='local')\n cur=conn.cursor()\n with open('tb_pressure.sql','r') as f:\n for line in f:\n cur.execute(line)\n conn.commit()\n conn.close()\n\n # import sqlite3\n # db = sqlite3.connect(cur_db)\n # cur = db.cursor()\n # with open('world.sql', 'rb') as f:\n # cur.executescript(f.read())\n\ndef put_to_redis():\n r = redis.Redis(host='localhost', port=6379, db=10)\n conn = get_mysql_conn('db_parker', local=True)\n cursor = conn.cursor()\n cmd = 'select `identity_number` from frauds'\n cursor.execute(cmd)\n ret = cursor.fetchall()\n for i in ret:\n r.lpush('identity', i[0])\n\n\ndef query_case():\n connect = pymysql.connect(host='', port=0, user='', password='', db='losecredit',\n charset='utf8')\n cursor = connect.cursor()\n cmd = 'select DISTINCT fname from dishonest limit 2000'\n cursor.execute(cmd)\n ret = cursor.fetchall()\n name = []\n for i in ret:\n name.append(i[0])\n for i in name:\n cmd2 = 'select flag,count(*) as fn from dishonest where fname={!r} group by `flag` having fn=1'.format(i)\n cursor.execute(cmd2)\n ret3 = cursor.fetchall()\n if ret3:\n print(ret3)\n print(i)\n\n\ndef threading_case():\n start = time.time()\n\n dbpool = pymysql.connect(host=config.mysql_ip, port=3367, user='crawler', password='Crawler@1234',\n database='losecredit', charset='utf8')\n # 不能用dbpool\n # dbpool = adbapi.ConnectionPool('pymysql', host=config.mysql_ip, port=3367, user='crawler', password='Crawler@1234',\n # database='losecredit', charset='utf8')\n thread_list = []\n for i in range(100):\n t = threading.Thread(target=access_nornal, args=(dbpool,))\n thread_list.append(t)\n for t in thread_list:\n t.start()\n t.join()\n\n print('time used {}ms'.format((time.time() - start) * 1000))\n\n\ndef access_nornal(dbpool):\n\n name = '杨小东'\n idnum = '3207051972083015{}{}'.format(random.randint(0, 9), random.randint(0, 9))\n cmd = \"SELECT DISTINCT t.cidno,t.fname, t.region, t.case_time, t.case_no, t.court, t.basis_no, t.detail, t.fullfil, t.publish_time FROM dw_person_dishonest t where t.fname='{0}' and t.cidno ='{1}'\".format(\n name, idnum)\n\n cursor = dbpool.cursor()\n cursor.execute(cmd)\n ret = cursor.fetchall()\n if ret:\n print(ret[0],'\\n')\n\n\n\ndef handle_error(field):\n\n print('failed ',field)\n\n# 异步写入mysqlkzhwo\ndef dbpool_main():\n print('start to run')\n dbpool = adbapi.ConnectionPool('pymysql', host=config.mysql_ip, port=3367,\n user=config.username,\n password=config.password,\n database='spider', charset='utf8')\n\n for i in range(100):\n data=(f'XGD{i}',f'S{i}')\n # print(data)\n query = dbpool.runInteraction(query_cmd, data)\n query.addErrback(handle_error)\n\n reactor.run() # 一定加这一句, 不然无法运行\n\n\ndef query_cmd(cursor, data):\n print(data)\n cmd = \"INSERT INTO tb_pressure (name,score) values(%s,%s)\"\n\n cursor.execute(cmd, data)\n\n # dbpool = adbapi.ConnectionPool('pymysql', host='', port=0, user='', password='@',\n # database='losecredit', charset='utf8')\n\n\n\n\n# 异步操作数据库\ndef async_sql_demo():\n def runQuery(dbpool):\n name = '杨小东'\n idnum = '320705197208301539'\n # idnum = '3207051972083015{}{}'.format(random.randint(0, 9), random.randint(0, 9))\n cmd = \"\".format(\n name, idnum)\n return dbpool.runQuery(cmd)\n\n # 异步操作\n def query_cmd1(l):\n print(l)\n # for item in l:\n # print(item)\n\n dbpool = adbapi.ConnectionPool('pymysql', host=config.mysql_ip, port=3367,\n user=config.username,\n password=config.password,\n database='spider', charset='utf8')\n\n runQuery(dbpool).addCallback(query_cmd1)\n # reactor.callLater(1, reactor.stop)\n reactor.run()\n # reactor.stop()\n print('End')\n\n# 测试,失效了\ndef connection_check():\n conn_test = pymysql.connect(host='rds0710650me01y6d3ogo.mysql.rds.aliyuncs.com',\n port=3306,\n user='yunker',\n passwd='yunke2016',\n db='',\n charset='utf8'\n )\n cursor = conn_test.cursor()\n\n\ndef main():\n # DB_Usage()\n # DB_Usage_sqlite()\n # Aliyun()\n # obj = MysqlUsage()\n # obj.query()\n # obj.delete_item()\n # obj.modify_table()\n # obj.sql_table()\n # obj.create_table('houseinfo')\n # obj.mysql_add_data('temp')\n # obj.query()\n # obj.update()\n # obj.transfer_data()\n # obj.show_all_table()\n # remote_mysql2()\n # remote_mysql()\n # create_db_case()\n # remove_row()\n\n # run_sql_script()\n\n # groupcheck()\n\n # put_to_redis()\n # query_case()\n # threading_case()\n\n # dbpool_main()\n\n # pool_main()\n # connection_check()\n async_sql_demo()\n\n\nif __name__ == '__main__':\n # data_path = os.path.join(os.getcwd(), 'data')\n # os.chdir(data_path)\n main()\n","sub_path":"mysql_usage.py","file_name":"mysql_usage.py","file_ext":"py","file_size_in_byte":15286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"580952192","text":"from django.http import HttpResponseRedirect\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.views import generic\n\nfrom .cart import Cart\nfrom .forms import CartAddProductForm\nfrom shop.models import Product\n\n\nclass CartAddView(generic.CreateView):\n http_method_names = ['post', 'head', 'options', ]\n\n def post(self, request, *args, **kwargs):\n cart = Cart(request)\n product = get_object_or_404(Product, id=self.kwargs.get(\"product_id\"))\n form = CartAddProductForm(request.POST)\n if form.is_valid():\n cd = form.cleaned_data\n cart.add(product=product, quantity=cd['quantity'], update_quantity=cd['update'])\n return HttpResponseRedirect(request.POST.get('next', '/'))\n\n\nclass CartRemoveView(generic.DeleteView):\n def get(self, request, *args, **kwargs):\n cart = Cart(request)\n product = get_object_or_404(Product, id=self.kwargs.get(\"product_id\"))\n cart.remove(product)\n return redirect('cart:cart_detail')\n\n\nclass CartDetailView(generic.View):\n def get(self, request, *args, **kwargs):\n cart = Cart(request)\n for item in cart:\n item['update_quantity_form'] = CartAddProductForm(initial={'quantity': item['quantity'], 'update': True})\n return render(request, 'cart/detail.html', {'cart': cart})\n","sub_path":"cart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"355785627","text":"from flask import Blueprint, render_template, abort, flash, redirect, url_for\nfrom flask.ext.login import login_required, current_user\nimport speakeasy\nfrom speakeasy.database import Config, db_session, Page\nfrom speakeasy.forms import ConfigForm \nfrom speakeasy.views.utils import menu, blog_menu_location, site_config\n\n\nimport datetime\n\nconfig_blueprint = Blueprint('config', __name__,\n template_folder='templates', url_prefix='/config')\n\n@config_blueprint.route('/', methods=['GET'])\n@login_required\ndef view_config():\n form = config_form()\n current_config = site_config()\n form.site_display_name.data = current_config.site_display_name\n form.site_title.data = current_config.site_title \n form.site_strap_line.data = current_config.site_strap_line\n form.index_page_id.data = current_config.index_page_id\n form.mail_server.data = current_config.mail_server\n form.mail_port.data = current_config.mail_port\n form.mail_username.data = current_config.mail_username\n form.mail_password.data = current_config.mail_password\n form.mail_use_tls.data = current_config.mail_use_tls\n form.mail_enable.data = current_config.mail_enable \n return render_template(\"edit_config.html\", user=current_user, \\\n menu=menu(), title=\"Edit Site Config\", site_config=current_config, form=form)\n\n@config_blueprint.route('/', methods=['POST'])\n@login_required\ndef edit_config():\n form = config_form()\n current_config = site_config()\n if form.validate_on_submit():\n current_config.site_display_name = form.site_display_name.data\n current_config.site_title = form.site_title.data\n current_config.site_strap_line = form.site_strap_line.data\n current_config.index_page_id = form.index_page_id.data\n current_config.mail_server = form.mail_server.data\n current_config.mail_port = form.mail_port.data\n current_config.mail_username = form.mail_username.data\n current_config.mail_password = form.mail_password.data\n current_config.mail_use_tls = form.mail_use_tls.data\n current_config.mail_enable = form.mail_enable.data\n db_session.add(current_config)\n db_session.commit()\n flash(\"Settings successfully updated\")\n else:\n flash(\"Failed to update settings.\")\n return render_template(\"edit_config.html\", user=current_user, \\\n menu=menu(), title=\"Edit Site Config\", site_config=current_config, form=form)\n\ndef config_form():\n form = ConfigForm()\n form.index_page_id.choices = [(p.id, p.title) for p in Page.query.order_by(Page.id).all()]\n return form\n\n\"\"\"\n@blog.route('/')\n\ndef view_blog():\n posts = Blog.query.order_by(Blog.timestamp.desc()).all()\n return render_template('blog_overview.html', user=current_user, \\\n menu=menu(blog_menu_location()), posts=posts, title=\"Blog\")\n\n@blog.route('/post/', methods=['GET'])\n@login_required\ndef show_edit_post(id):\n post = Blog.query.get(id)\n form = BlogPost()\n if post is None:\n abort(404)\n form.title.data = post.title\n form.content.data = post.content\n return render_template(\"edit_blog.html\", form=form, user=current_user,\\\n menu=menu(blog_menu_location()), post=post, post_id=post.id, \\\n title=\"Editing %s\" %(post.title))\n\n@blog.route('/post', methods=['GET'])\n@login_required\ndef show_create_post():\n form = BlogPost() \n return render_template(\"edit_blog.html\", form=form, \\\n user=current_user, menu=menu(blog_menu_location()))\n\n@blog.route('/post/', methods=['POST'])\n@login_required\ndef edit_post(id):\n post = Blog.query.get(id)\n if post is None:\n abort(404)\n form = BlogPost()\n if form.validate_on_submit():\n post.title = form.title.data\n post.content = form.content.data\n db_session.add(post)\n db_session.commit()\n flash(\"Post successfully updated\")\n else:\n flash(\"Failed to update post, failed to validate user input.\")\n return show_edit_post(id)\n\n@blog.route('/post', methods=['POST'])\n@login_required\ndef create_blog_post():\n form = BlogPost()\n if form.validate_on_submit():\n new_post = Blog( \\\n title = form.title.data, \\\n author_user_id=current_user.id, \\\n content=form.content.data, \\\n timestamp=datetime.datetime.now())\n db_session.add(new_post)\n db_session.commit()\n flash(\"New blog post successfully posted.\")\n return view_blog() \n else:\n flash(\"Failed to post, user input validation failed.\")\n return render_template(\"edit_blog.html\", form=form, \\\n user=current_user, menu=menu(blog_menu_location()))\n\"\"\"\n","sub_path":"wsgi/speakeasy/views/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":4712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"542140265","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*- \nimport re\n\nTIDE = 2\nclass GomokuBoard(object):\n \"\"\"\n 五子棋,棋盘\n \"\"\"\n\n def __init__(self, rowCount, colCount, nInRow):\n \"\"\"\n 五子棋棋盘\n rowCount 行数\n colCount 列数\n nInRow 在一行能赢得棋子数目,五子棋为5 四子棋为4\n \"\"\"\n self.rowCount = rowCount\n self.colCount = colCount\n self.states = {}\n self.curPlayer = 0\n self.availables = set(range(0, self.rowCount * self.colCount))\n self.moveHistory = []\n self.playerName = ['B', 'W', 'T']\n self.isGameOver = False\n self.winColor = None\n self.nInRow = nInRow\n\n def get_cur_step_no(self):\n return len(self.moveHistory)\n\n def move_to_location(self, r, c):\n \"\"\"\n convert move(r,c) to location x\n \"\"\"\n return r * self.colCount + c\n\n def location_to_move(self, x):\n \"\"\"\n convert location x to move (r, c)\n \"\"\"\n r = x / self.colCount\n c = x % self.colCount\n return (r, c)\n\n def play(self, r, c, color):\n \"\"\"\n 在(r, c)处下子\n \"\"\"\n x = self.move_to_location(r, c)\n if self.curPlayer != color:\n raise Exception(\"invalid move. current player is \" + self.playerName[self.curPlayer])\n if self.states.has_key(x):\n raise Exception(\"invalid move (%d, %c) already has chess\" % (r, c))\n self.availables.remove(x)\n self.states[x] = color\n self.moveHistory.append( (x, color) )\n self.curPlayer = 1 - self.curPlayer\n\n def load_from_sgf(self, sgfStr):\n \"\"\"\n load chess from sgf string like B[a9];W[99]\n \"\"\"\n pattern = re.compile(\"[BW]\\[[0-9a-f]{2}\\]\")\n for step in sgfStr.split(\";\"):\n step = step.strip()\n if pattern.match(step) is None:\n continue\n row = int(step[2], 16)\n col = int(step[3], 16)\n color = step[0]\n for i in range(0, len(self.playerName)):\n if color == self.playerName[i]:\n color = i\n self.play(row, col, color)\n\n def to_sgf_str(self):\n \"\"\"\n convert chess to sgf string\n \"\"\"\n s = \"\"\n for step in self.moveHistory:\n x = step[0]\n color = step[1]\n (r, c) = self.location_to_move(x)\n s += \"%s[%s%s];\" % ( self.playerName[color]\n , hex(r)[2:3]\n , hex(c)[2:3])\n return s\n\n def __str__(self):\n \"\"\"\n transfer to chess board for pringting\n \"\"\"\n res = \" \"\n for c in range(0, self.colCount):\n res += hex(c)[2:]\n res += \" \"\n res += '\\n'\n if len(self.moveHistory) > 0:\n lastMove = self.moveHistory[-1]\n lastMove = self.location_to_move(lastMove[0])\n else:\n lastMove = (-1, -1)\n for r in range(0, self.rowCount):\n res += hex(r)[2:]\n if (r, 0) == lastMove:\n res += \"(\"\n else:\n res += \" \"\n for c in range(0, self.colCount):\n i = self.move_to_location(r, c)\n if self.states.has_key(i):\n color = self.playerName[self.states[i]]\n else:\n color = '.'\n res += color\n if (r, c) == lastMove:\n res += \")\"\n elif (r, c + 1) == lastMove:\n res += \"(\"\n else:\n res += \" \"\n res += '\\n'\n return res\n\n def check_game_over(self):\n \"\"\"\n check if game is over\n \"\"\"\n dr = [-1, -1, 0, 1]\n dc = [0, 1, 1, 1]\n if len(self.moveHistory) == 0:\n self.winColor = None\n self.isGameOver = False\n return False\n (x, color) = self.moveHistory[-1]\n (r, c) = self.location_to_move(x)\n for i in range(0, len(dr)):\n cnt = 1\n nr = r\n nc = c\n for j in range(0, 4):\n nr += dr[i]\n nc += dc[i]\n if nr < 0 or nr >= self.rowCount or nc < 0 or nc >= self.colCount:\n break\n nx = self.move_to_location(nr, nc)\n ncolor = self.states.get(nx, -1)\n if color != ncolor:\n break\n cnt += 1\n nr = r\n nc = c\n for j in range(0, 4):\n nr -= dr[i]\n nc -= dc[i]\n if nr < 0 or nr >= self.rowCount or nc < 0 or nc >= self.colCount:\n break\n nx = self.move_to_location(nr, nc)\n ncolor = self.states.get(nx, -1)\n if color != ncolor:\n break\n cnt += 1\n if cnt >= self.nInRow:\n self.isGameOver = True\n self.winColor = color\n return self.isGameOver\n if len(self.availables) == 0:\n self.winColor = TIDE\n self.isGameOver = True\n else:\n self.winColor = None\n self.isGameOver = False\n return self.isGameOver\n\n def is_valid_move(self, r, c):\n \"\"\"\n check if r,c is a valid move\n \"\"\"\n if r < 0 or c < 0 or r >= self.rowCount or c >= self.colCount:\n return False\n x = self.move_to_location(r, c)\n return x in self.availables\n\n def undo(self):\n if len(self.moveHistory) == 0:\n return False\n (x, color) = move = self.moveHistory[-1]\n (r, c) = self.location_to_move(x)\n self.availables.add(x)\n del self.states[x]\n self.curPlayer = color\n self.moveHistory.remove(move)\n return True\n\n\n\n\n\n\n","sub_path":"history_code/gomoku_8_8_5_alpha_zero/gomoku_chess.py","file_name":"gomoku_chess.py","file_ext":"py","file_size_in_byte":5918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"200726316","text":"# -*- coding: UTF-8 -*-\n\nimport sys\nimport json\n\nfrom pypokerengine.players import BasePokerPlayer\nfrom pypokerengine.utils.card_utils import gen_cards, estimate_hole_card_win_rate\n\nNB_SIMULATION = 200\n\n\n\nclass MyPlayer(BasePokerPlayer):\n\n\n def declare_action(self, valid_actions, hole_card, round_state):\n\n fold_action_info, call_action_info, raise_action_info = valid_actions[0], valid_actions[1], valid_actions[2]\n\n pot_size = round_state[\"pot\"][\"main\"][\"amount\"]\n\n self.nb_active = len([player for player in round_state['seats'] if player['state'] != 'folded'])\n\n community_card = round_state['community_card']\n\n free_flop = self.seat == round_state[\"big_blind_pos\"] and call_action_info[\"amount\"] == 30\n\n win_rate = estimate_hole_card_win_rate(\n nb_simulation=NB_SIMULATION,\n nb_player=self.nb_active,\n hole_card=gen_cards(hole_card),\n community_card=gen_cards(community_card)\n )\n\n street = round_state[\"street\"]\n percent = 1.0 / self.nb_active\n\n if street == \"preflop\":\n if self.nb_active >=7 and win_rate >= 0.29:\n action = \"raise\"\n amount = raise_action_info[\"amount\"][\"max\"]\n if amount == -1 or amount == -4:\n action = \"call\"\n amount = call_action_info[\"amount\"]\n elif self.nb_active >=3 and win_rate >= 0.37:\n action = \"raise\"\n amount = raise_action_info[\"amount\"][\"max\"]\n if amount == -1 or amount == -4:\n action = \"call\"\n amount = call_action_info[\"amount\"]\n elif self.nb_active >=1 and win_rate >= 0.75:\n action = \"raise\"\n amount = raise_action_info[\"amount\"][\"max\"]\n if amount == -1 or amount == -4:\n action = \"call\"\n amount = call_action_info[\"amount\"]\n # elif win_rate >= 0.19:\n # action = \"raise\"\n # amount = min(pot_size, 4 * raise_action_info[\"amount\"][\"min\"])\n # if amount == -1 or amount == -4:\n # action = \"call\"\n # amount = call_action_info[\"amount\"]\n elif win_rate >= 0.16 and call_action_info[\"amount\"] == 30:\n action = \"call\"\n amount = call_action_info[\"amount\"]\n else:\n action = \"fold\"\n amount = fold_action_info[\"amount\"]\n if free_flop:\n action = \"call\"\n amount = call_action_info[\"amount\"]\n\n # if street == \"flop\":\n if win_rate >= percent * 1.2:\n action = \"raise\"\n amount = raise_action_info[\"amount\"][\"max\"]\n if amount == -1 or amount == -4:\n action = \"call\"\n amount = call_action_info[\"amount\"]\n elif win_rate >= percent:\n action = \"raise\"\n amount = min(pot_size, 4 * raise_action_info[\"amount\"][\"min\"])\n if amount == -1 or amount == -4:\n action = \"call\"\n amount = call_action_info[\"amount\"]\n else:\n action = \"fold\"\n amount = fold_action_info[\"amount\"]\n\n # if street == \"turn\":\n # if win_rate >= percent:\n # action = \"raise\"\n # amount = raise_action_info[\"amount\"][\"max\"]\n # if amount == -1 or amount == -4:\n # action = \"call\"\n # amount = call_action_info[\"amount\"]\n # elif win_rate >= percent*0.9:\n # action = \"raise\"\n # amount = min(pot_size, 4 * raise_action_info[\"amount\"][\"min\"])\n # if amount == -1 or amount == -4:\n # action = \"call\"\n # amount = call_action_info[\"amount\"]\n # elif win_rate >= percent*0.8:\n # action = \"call\"\n # amount = call_action_info[\"amount\"]\n # else:\n # action = \"fold\"\n # amount = fold_action_info[\"amount\"]\n #\n # if street == \"river\":\n # if win_rate >= percent:\n # action = \"raise\"\n # amount = raise_action_info[\"amount\"][\"max\"]\n # if amount == -1 or amount == -4:\n # action = \"call\"\n # amount = call_action_info[\"amount\"]\n # elif win_rate >= percent*0.8:\n # action = \"raise\"\n # amount = min(pot_size, 4 * raise_action_info[\"amount\"][\"min\"])\n # if amount == -1 or amount == -4:\n # action = \"call\"\n # amount = call_action_info[\"amount\"]\n # elif win_rate >= percent*0.8:\n # action = \"call\"\n # amount = call_action_info[\"amount\"]\n # else:\n # action = \"fold\"\n # amount = fold_action_info[\"amount\"]\n\n if action == \"raise\":\n amount = max(amount, raise_action_info[\"amount\"][\"min\"])\n amount = min(amount, raise_action_info[\"amount\"][\"max\"])\n\n if action == 'raise' and amount == -1:\n action = 'call'\n amount = call_action_info['amount']\n\n #print 'hole_card:'\n #print hole_card\n\n #print 'win_rate:'\n #print win_rate\n\n #print 'percent:'\n #print percent\n\n #print 'nb_player'\n #print self.nb_active\n\n return action, amount\n\n def receive_game_start_message(self, game_info):\n for (i, seat) in enumerate(game_info[\"seats\"]):\n if seat[\"uuid\"] == self.uuid:\n self.seat = i\n\n def receive_round_start_message(self, round_count, hole_card, seats):\n pass\n\n\n def receive_street_start_message(self, street, round_state):\n pass\n\n def receive_game_update_message(self, action, round_state):\n pass\n\n def receive_round_result_message(self, winners, hand_info, round_state):\n pass\n\n\nif __name__ == '__main__':\n\n player = MyPlayer()\n\n while True:\n line = sys.stdin.readline().rstrip()\n if not line:\n break\n event_type, data = line.split('\\t', 1)\n data = json.loads(data)\n\n if event_type == 'declare_action':\n action, amount = player.declare_action(data['valid_actions'], data['hole_card'], data['round_state'])\n sys.stdout.write('{}\\t{}\\n'.format(action, amount))\n sys.stdout.flush()\n elif event_type == 'game_start':\n player.set_uuid(data.get('uuid'))\n player.receive_game_start_message(data)\n elif event_type == 'round_start':\n player.receive_round_start_message(data['round_count'], data['hole_card'], data['seats'])\n elif event_type == 'street_start':\n player.receive_street_start_message(data['street'], data['round_state'])\n elif event_type == 'game_update':\n player.receive_game_update_message(data['new_action'], data['round_state'])\n elif event_type == 'round_result':\n player.receive_round_result_message(data['winners'], data['hand_info'], data['round_state'])\n else:\n raise RuntimeError('Bad event type \"{}\"'.format(event_type))\n","sub_path":"bots/bot_denis_4.py","file_name":"bot_denis_4.py","file_ext":"py","file_size_in_byte":7348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"487357484","text":"import cherrypy\nfrom datawake.util.exceptions import datawakeexception\nfrom datawake.util.db import datawake_mysql\nimport tangelo\n\ndef is_in_session(callback):\n def has_session(**kwargs):\n if 'user' in cherrypy.session:\n return callback(**kwargs)\n tangelo.http_status(401)\n tangelo.log(\"401 Unauthorized No user in session\")\n return \"No User in the current session\"\n return has_session\n\n\n\ndef has_team(callback):\n \"\"\"\n Decorator for tangelo web services.\n Requires a team_id and checks that the current user has permissions for that team\n :param callback:\n :return:\n \"\"\"\n def verifyTeamId(**kwargs):\n if 'team_id' not in kwargs:\n tangelo.http_status(500)\n tangelo.log(\"team_id required.\")\n return \"team id required for this call.\"\n\n user = get_user()\n\n # verify the user can access the team\n if not datawake_mysql.hasTeamAccess(user.get_email(),kwargs['team_id']):\n tangelo.content_type()\n tangelo.http_status(401)\n tangelo.log(\"401 Unauthorized. User has no access to requested team.\")\n return \"401 Unauthorized\"\n\n return callback(**kwargs)\n return verifyTeamId\n\n\ndef has_domain(callback):\n \"\"\"\n Decorator for tangelo web servcies\n Requires a team_id and domain_id, checks that the team can access the domain\n :param callback:\n :return:\n \"\"\"\n def verifyDomainId(**kwargs):\n\n if 'team_id' not in kwargs or 'domain_id' not in kwargs:\n tangelo.http_status(500)\n tangelo.log(\"team_id and domain_id required.\")\n return \"team id and domain id required for this call.\"\n\n team_id = int(kwargs['team_id'])\n domain_id = int(kwargs['domain_id'])\n if not datawake_mysql.hasDomains(team_id,domain_id):\n tangelo.http_status(401)\n tangelo.log(\"401 Unauthorized. Team has no access to requested domain\")\n return \"401 Unauthorized\"\n return callback(**kwargs)\n return verifyDomainId\n\n\n\ndef has_trail(callback):\n \"\"\"\n Decorator for tangelo web servcies\n Requires a team_id and trail_id, checks that the team can access to the trail\n :param callback:\n :return:\n \"\"\"\n def verifyTrailId(**kwargs):\n\n if 'team_id' not in kwargs or 'trail_id' not in kwargs or 'domain_id' not in kwargs:\n tangelo.http_status(500)\n tangelo.log(\"team_id, domain id, and trail_id required.\")\n tangelo.log(kwargs)\n return \"team id, domain id, and trail id required for this call.\"\n\n team_id = int(kwargs['team_id'])\n domain_id = int(kwargs['domain_id'])\n trail_id = int(kwargs['trail_id'])\n\n if not datawake_mysql.hasTrail(team_id,domain_id,trail_id):\n tangelo.http_status(401)\n tangelo.log(\"401 Unauthorized. Team has no access to requested domain\")\n return \"401 Unauthorized\"\n return callback(**kwargs)\n return verifyTrailId\n\n\ndef get_user():\n user = cherrypy.session.get('user')\n return user\n\n\ndef get_org():\n user = get_user()\n if user is not None:\n return user.get_org()\n\n return None\n\n\ndef get_token():\n return cherrypy.session.get('token')\n\n\ndef is_token_in_session():\n return 'token' in cherrypy.session\n\n\ndef expire_user():\n if 'user' in cherrypy.session:\n del cherrypy.session['user']\n if 'token' in cherrypy.session:\n del cherrypy.session['token']\n cherrypy.lib.sessions.expire()\n return True\n\n\n\ndef set_user(user):\n \"\"\"\n Set the user object.\n If this user is loging in for the first time automatically create a private team and a blank domain\n :param user:\n :return:\n \"\"\"\n teams = datawake_mysql.getTeams(email=user.get_email())\n if len(teams) == 0:\n # create a team for the new user.\n (team_id,team_name) = datawake_mysql.createTeam(user.get_email(),'Auto generated private team.',emails=[user.get_email()])\n\n teams = [ (team_id,team_name) ]\n datawake_mysql.add_new_domain(team_id,\"Empty\", \"An empty domain. Created by default for each team.\")\n user.set_teams(teams)\n cherrypy.session['user'] = user\n return True\n\n\ndef set_token(token):\n cherrypy.session['token'] = token\n return True\n","sub_path":"server/datawake/util/session/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":4324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"520441117","text":"from splinter import Browser\n\n\nclass ChopeBrowser:\n def __init__(self, headless=False):\n self.chrome = Browser('chrome', headless=headless)\n\n def login(self, usr, pwd, domain='STUDENT'):\n url = 'https://ntupcb.ntu.edu.sg'\n url += '/fbscbs/Account/SignIn?ReturnUrl=%2ffbscbs'\n\n self.chrome.visit(url)\n dropdown = self.chrome.find_by_tag('option')\n\n for option in dropdown:\n if option.text == domain:\n option.click()\n\n self.chrome.fill('Username', usr)\n self.chrome.fill('Password', pwd + '\\n')\n\n def first_setup(self):\n button = self.chrome.find_by_id('tdFacilityBook')\n button.click()\n self.chrome.click_link_by_href('#8')\n self.chrome.click_link_by_href('#-1')\n self.chrome.click_link_by_href('/fbscbs/Booking/Create?resourceId=69')\n self.chrome.click_link_by_id('book')\n self.chrome.click_link_by_id('changeResource')\n self.chrome.click_link_by_href('#-1')\n self.chrome.click_link_by_id('book')\n\n def is_registered(event):\n if event.has_class('noShowWhite'):\n return False\n if event.has_class('currentEvent'):\n return False\n return True\n\n def check_facility(self, counter, evFacilities):\n columnWeek = self.chrome.find_by_css('.wc-event-column')\n evWeek = []\n for columnDay in columnWeek:\n evToday = []\n evList = columnDay.find_by_css('.ui-corner-all')\n for event in evList:\n # biar gatebel gw bongkar ya dan\n if event.has_class('noShowWhite'):\n continue\n if event.has_class('currentEvent'):\n continue\n if event.text == '':\n continue\n eventText = event.text\n if not eventText.find('—') == -1:\n evToday.append(eventText.split('—'))\n evWeek.append(evToday)\n evFacilities.append(evWeek)\n counter += 1\n self.click_next(counter, evFacilities)\n\n def click_next(self, counter, evFacilities):\n # Kerja rekursif dengan check_facility.\n # Milih option facility berdasarkan counter.\n dropdown = self.chrome.find_by_id('ResourceId')\n options = dropdown.find_by_tag('option')\n if counter < len(options):\n nextOption = options[counter]\n nextOption.click()\n self.check_facility(counter, evFacilities)\n else:\n return evFacilities\n\n def scrape_seats(self, usr, pwd):\n self.login(usr, pwd)\n self.first_setup()\n counter = 0\n evFacilities = []\n self.check_facility(counter, evFacilities)\n return evFacilities\n\n def quit(self):\n self.chrome.quit()\n\n\ndef try_login(usr, pwd):\n \"\"\" TODO:\n given user password lw coba dia bisa login ga ke server\n kalo bisa return true\n kalo gabisa return false\n \"\"\"\n\n \"\"\"\n btw codingan lw rapih kok wkkwkw gw cukup impressed\n kek bahkan codingan lw gaada trailing spaces\n kudos for you Dan!\n but pake spasi ya jgn pake tabs kwkwkkw but overall codingannya bagus\n and variable lw jga ga berantakan casenya itu nice\n mengurangi kerja gua KWKWKWK\n gw bahkan gapake linter gabisa kerja rapih\n dan lw melakukan itu tanpa linter\n itu keren sih\n dan gaada satupun W-unused wkkww nice...\n \"\"\"\n\n # kalo lw gangerti maksud class diatas apaan\n # ini caara untuk bikin browsernya\n # coba mainin deh\n instances = ChopeBrowser()\n url = 'http://google.com'\n instances.chrome.visit(url)\n return True\n","sub_path":"chopeBrowser.py","file_name":"chopeBrowser.py","file_ext":"py","file_size_in_byte":3712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"290646628","text":"from selenium.webdriver.common.keys import Keys\nimport json\nimport os\nfrom time import sleep\n\nKUSTOMER_CONFIG = 'app/extra/kustomer.json'\n\nclass Kustomer:\n def __init__(self, browser):\n self.browser = browser\n self.load_config()\n\n def load_config(self):\n self.config = json.load(open(KUSTOMER_CONFIG, encoding='utf-8')) \n\n def open(self):\n self.browser.open_page(self.config['url'])\n\n def login(self):\n # Click on google login\n login_button = self.browser.find_element_before(\n self.config['login']['google_button']['name'],\n self.config['login']['google_button']['option']\n )\n self.browser.send_above_click(login_button)\n\n def select_reports(self):\n self.browser.find_element_before(\n self.config['slidebar']['reporting']['name'],\n self.config['slidebar']['reporting']['option']\n ).click()\n\n def create_report(self):\n # Request from user\n custom_report = CustomReport()\n # Create new report\n self.browser.find_element_before(\n self.config['reporting']['new']['new_button']['name'],\n self.config['reporting']['new']['new_button']['option']\n ).click()\n self.browser.find_element_before(\n self.config['reporting']['new']['modal_input']['name'],\n self.config['reporting']['new']['modal_input']['option']\n ).send_keys(custom_report.name)\n self.browser.find_element_before(\n self.config['reporting']['new']['add_report_button']['name'],\n self.config['reporting']['new']['add_report_button']['option']\n ).click()\n \n def build_report(self):\n # Request from user\n custom_report = CustomReport()\n # Create Chart\n for chart in custom_report.template['data']:\n '''\n First Part\n '''\n # Add chart\n self.browser.find_element_before(\n self.config['reporting']['add_chart']['add_chart_button']['name'],\n self.config['reporting']['add_chart']['add_chart_button']['option']\n ).click()\n # Select blank template\n self.browser.find_element_before(\n self.config['reporting']['add_chart']['add_chart_template']['name'],\n self.config['reporting']['add_chart']['add_chart_template']['option']\n ).click()\n # Add chart style\n self.browser.find_element_before(\n self.config['reporting']['add_chart']['add_chart_style']['name'].replace('[style]', chart['style']), \n self.config['reporting']['add_chart']['add_chart_style']['option']\n ).click()\n # Next Button\n self.browser.find_element_before(\n self.config['reporting']['add_chart']['next_button']['name'],\n self.config['reporting']['add_chart']['next_button']['option']\n ).click()\n '''\n Second Part\n '''\n # Add report type\n report_type = self.browser.find_element_before(\n self.config['reporting']['add_chart']['second_step']['report_type']['name'],\n self.config['reporting']['add_chart']['second_step']['report_type']['option']\n )\n self.browser.send_above_keys(chart['report_type'], report_type)\n # Add report date attribute\n date_attribute = self.browser.find_element_before(\n self.config['reporting']['add_chart']['second_step']['date_attribute']['name'],\n self.config['reporting']['add_chart']['second_step']['date_attribute']['option']\n )\n self.browser.send_above_keys(chart['attributes'], date_attribute)\n\n # Add segmentation\n attribute_calculation = chart['attribute_calculation']\n self._build_segmentation(attribute_calculation)\n\n # Match all and any\n self._build_match_operator(chart['all_filter'], is_and=True)\n self._build_match_operator(chart['any_filter'])\n\n # Wait for chart refresh or error...\n sleep(3)\n\n # Next\n self.browser.find_element_before(\n self.config['reporting']['add_chart']['next_button']['name'],\n self.config['reporting']['add_chart']['next_button']['option']\n ).click()\n\n '''\n Third part\n '''\n # Title\n title_input = self.browser.find_element_before(\n self.config['reporting']['add_chart']['third_step']['title_input']['name'],\n self.config['reporting']['add_chart']['third_step']['title_input']['option']\n ).send_keys(chart['title'])\n\n # Next\n self.browser.find_element_before(\n self.config['reporting']['add_chart']['save_button']['name'],\n self.config['reporting']['add_chart']['save_button']['option']\n ).click()\n\n def _build_segmentation(self, attribute_calculation):\n for index, attribute in enumerate(attribute_calculation):\n action_input = self.browser.find_element_before(\n self.config['reporting']['add_chart']['second_step']['segmentation']['action']['name'],\n self.config['reporting']['add_chart']['second_step']['segmentation']['action']['option']\n )\n self.browser.send_above_keys(attribute['action'], action_input)\n property_input = self.browser.find_element_before(\n self.config['reporting']['add_chart']['second_step']['segmentation']['property']['name'],\n self.config['reporting']['add_chart']['second_step']['segmentation']['property']['option']\n )\n self.browser.send_above_keys(attribute['action_property'], property_input)\n if 'extra_option' in attribute.keys():\n aux_input = self.browser.find_element_before(\n self.config['reporting']['add_chart']['second_step']['segmentation']['aux']['name'],\n self.config['reporting']['add_chart']['second_step']['segmentation']['aux']['option']\n )\n self.browser.send_above_keys(attribute['extra_option'], aux_input)\n if index < len(attribute_calculation) - 1:\n add_action_button = self.browser.find_element_before(\n self.config['reporting']['add_chart']['second_step']['segmentation']['add_button']['name'],\n self.config['reporting']['add_chart']['second_step']['segmentation']['add_button']['option']\n )\n self.browser.send_above_click(add_action_button)\n \n def _build_match_operator(self, elements, is_and=False):\n button_index = 0 if is_and else 1\n for index, attribute in enumerate(elements):\n add_action_button = self.browser.find_elements(\n self.config['reporting']['add_chart']['second_step']['operators_button']['name'],\n self.config['reporting']['add_chart']['second_step']['operators_button']['option']\n )[button_index]\n self.browser.send_above_click(add_action_button)\n\n action_input = self.browser.find_element_before(\n self.config['reporting']['add_chart']['second_step']['operators_input']['name'],\n self.config['reporting']['add_chart']['second_step']['operators_input']['option']\n )\n combinacion_macabra = attribute['field'] + Keys.ENTER + attribute['operator']\n if 'value' in attribute.keys():\n combinacion_macabra += Keys.ENTER + attribute['value']\n self.browser.send_above_keys(combinacion_macabra, action_input)\n\n def delete_report(self, name):\n result = self.browser.search_and_click(name, self.config['reporting']['slidebar']['custom_reports'])\n if result:\n self.browser.click_before(self.config['reporting']['misc']['misc_button'])\n self.browser.click_before(self.config['reporting']['misc']['delete'])\n self.browser.wait_until(self.config['reporting']['delete_chart']['modal'])\n confirm = input(\"Escriba Y para confirmar la eliminación.\")\n if confirm.lower() == 'y':\n self.browser.click(self.config['reporting']['delete_chart']['button'])\n else:\n raise Exception(\"Element not found\")\n\n\nREQUEST = 'app/extra/request.json'\nTEMPLATE = 'app/templates/'\nclass CustomReport:\n def __init__(self):\n config = json.load(open(REQUEST, encoding='utf-8')) \n self.name = config['name']\n self.queues = config['queues']\n self.language = config['language'].lower()\n self.is_rt = config['is_rt']\n self.load_template()\n\n def load_template(self):\n url = os.path.join(TEMPLATE, self.language)\n url += 'rt' if self.is_rt else ''\n url += '.json'\n self.template = json.load(open(url, encoding='utf-8'))\n for i in range(len(self.template['data'])):\n for queue in self.queues:\n queue_option = {\n \"field\": \"Queue\", \n \"operator\": \"Is Equal To\" if self.language == 'en' else 'Es igual a', \n \"value\": queue\n }\n\n self.template['data'][i]['any_filter'].append(queue_option)","sub_path":"app/model/kustomer.py","file_name":"kustomer.py","file_ext":"py","file_size_in_byte":9457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"73706966","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 17 19:10:21 2017\n@title: model manager\n@author: shubham\n\"\"\"\nfrom custom_cost_functions import *\nfrom keras.models import Model,load_model\nfrom keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D,concatenate\nfrom keras.optimizers import Adam,SGD\nfrom keras.callbacks import ModelCheckpoint\n\n#%%Model generator\ndef get_model (input_shape,\n unet_args={\"lrate\":1e-4,'momentum':0.9,\"loss\":'binary_crossentropy',\"device\":\"/gpu:0\"}):\n \"\"\"Returns a U-net model.\"\"\"\n print(\"=\"*8,\"UNET\",\"=\"*8)\n img_rows=input_shape[0]\n img_cols=input_shape[1]\n channels=input_shape[2]\n #%%Model architecture\n with K.tf.device(unet_args[\"device\"]):\n inputs= Input((img_rows,img_cols,channels))\n conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)\n conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n \n conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)\n conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n \n conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)\n conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n \n conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)\n conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)\n \n conv4b = Conv2D(256, (3, 3), activation='relu', padding='same')(pool4)\n conv4b = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4b)\n pool4b = MaxPooling2D(pool_size=(2, 2))(conv4b)\n \n conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4b)\n conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)\n \n up6b = UpSampling2D(size=(2, 2))(conv5)\n conv6b = Conv2D(256, (3, 3), activation='relu', padding='same')(up6b)\n conv6b= Conv2D(256, (3, 3), activation='relu', padding='same')(conv6b)\n \n up6 = UpSampling2D(size=(2, 2))(conv6b)\n conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(up6)\n conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)\n \n up7 = UpSampling2D(size=(2, 2))(conv6)\n conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(up7)\n conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)\n \n up8 = UpSampling2D(size=(2, 2))(conv7)\n conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(up8)\n conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)\n \n up9 = UpSampling2D(size=(2, 2))(conv8)\n conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9)\n conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)\n \n conv10 = Conv2D(4, (1, 1), activation='softmax')(conv9)\n \n model = Model(inputs=[inputs], outputs=[conv10])\n \n model.compile(optimizer=SGD(lr=unet_args['lrate'],momentum=unet_args['momentum']), loss=unet_args['loss'], metrics=['acc'])\n #%%Model summary:\n model.summary()\n print(\"input_shape\",input_shape)\n print(\"optimizer:\",\"SGD\")\n print(\"unet_args:\",unet_args)\n dummy=input(\"Press enter to proceed.\")\n return model\n\ndef load_keras_model(path):\n return load_model(path)\n\nprint('-'*10,'baseline_unpooling_model.py loaded','-'*10)\n\n\nif __name__=='__main__':\n m=get_model((64,64,2))\n ","sub_path":"baseline_model.py","file_name":"baseline_model.py","file_ext":"py","file_size_in_byte":3775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"389230020","text":"from selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.common.action_chains import ActionChains\r\n\r\n\r\nclass DemoFindElementByID():\r\n def locate_by_id_demo(self):\r\n print ('abrindo o navegador')\r\n driver = webdriver.Chrome()\r\n print ('Definindo tamanho da janela')\r\n driver.set_window_size(624, 688)\r\n driver.get(\"https://www.instagram.com/accounts/emailsignup/\")\r\n window_before = driver.window_handles[0]\r\n print (window_before)\r\n # abrir aba p/ ir pro temp mail\r\n print ('Indo até o mail TM')\r\n driver.execute_script(\"window.open('https://mail.tm/pt/', '_blank')\")\r\n # mudar p temp mail\r\n print ('copiando o email')\r\n window_after = driver.window_handles[1]\r\n driver.switch_to.window(window_after)\r\n print (window_after)\r\n \r\n driver.switch_to.window(driver.window_handles[1])\r\n from time import sleep\r\n sleep(5) \r\n driver.find_element_by_id('address').click()\r\n driver.switch_to.window(driver.window_handles[0])\r\n driver.switch_to.window(window_before)\r\n elem = driver.find_element_by_name(\"emailOrPhone\")\r\n elem.clear()\r\n elem.send_keys(Keys.CONTROL, \"v\")\r\n from time import sleep\r\n sleep(3)\r\n from time import sleep\r\n sleep(2)\r\n name = driver.find_element_by_css_selector(\"input[name='fullName']\")\r\n print('Escolhendo um nome...')\r\n name.send_keys(\"Lourena Silva Cambalhota\")\r\n print('Escolhendo um usuário...')\r\n username = driver.find_element_by_css_selector(\"input[name='username']\")\r\n username.click()\r\n driver.delete_all_cookies()\r\n username = driver.find_element_by_xpath('//*[@id=\"react-root\"]/section/main/div/div/div[1]/div/form/div[5]/div/div/div/button/span').click()\r\n from time import sleep\r\n sleep(2)\r\n print('Definindo uma senha...')\r\n password = driver.find_element_by_css_selector(\"input[name='password']\")\r\n password.send_keys(\"123456789\")\r\n login_button = driver.find_element_by_xpath(\"//button[@type='submit']\")\r\n login_button.click()\r\n print ('Definindo a data de aniversário da conta...')\r\n from time import sleep\r\n sleep(3)\r\n ano = driver.find_element_by_css_selector(\"[title*='Ano:']\")\r\n ano.click()\r\n anoo = driver.find_element_by_xpath(\"//option[text()='1991']\")\r\n anoo.click()\r\n from time import sleep\r\n sleep(2)\r\n mes = driver.find_element_by_css_selector(\"[title*='Mês:']\")\r\n mes.click()\r\n mess = driver.find_element_by_xpath(\"//option[text()='setembro']\")\r\n mess.click()\r\n print('Data de aniversário inserida, avançando...')\r\n data = driver.find_element_by_xpath('//*[@id=\"react-root\"]/section/main/div/div/div[1]/div/div[6]/button')\r\n data.click()\r\n print ('Esperando o codigo de confirmação...')\r\n window_after = driver.window_handles[1]\r\n driver.switch_to.window(window_after) \r\n driver.switch_to.window(driver.window_handles[1])\r\n from time import sleep\r\n sleep(30)\r\n codigo = driver.find_element_by_xpath('//*[@id=\"__layout\"]/div/div[2]/main/div/div[2]/ul/li/a/div')\r\n codigo.click()\r\n driver.find_element_by_xpath('td[style = \"padding:10px;color:#565a5c;font-size:32px;font-weight:500;text-align:center;padding-bottom:25px;\"]')\r\n print(text)\r\n driver.switch_to.window(driver.window_handles[0])\r\n driver.switch_to.window(window_before)\r\n driver.find_element_by_css_selector(\"input[name='email_confirmation_code']\")\r\n elem.clear()\r\n elem.send_keys(Keys.CONTROL, \"v\")\r\n from time import sleep\r\n sleep(2)\r\n driver.find_element_by_xpath('//*[@id=\"react-root\"]/section/main/div/div/div[1]/div[2]/form/div/div[2]/button')\r\n print('tentando avançar..')\r\n \r\n \r\n def is_element_present(self, how, what):\r\n try: self.driver.find_element(by=how, value=what)\r\n except NoSuchElementException as e: return False\r\n return True\r\n \r\n def is_alert_present(self):\r\n try: self.driver.switch_to_alert()\r\n except NoAlertPresentException as e: return False\r\n return True\r\n \r\n def close_alert_and_get_its_text(self):\r\n try:\r\n alert = self.driver.switch_to_alert()\r\n alert_text = alert.text\r\n if self.accept_next_alert:\r\n alert.accept()\r\n else:\r\n alert.dismiss()\r\n return alert_text\r\n finally: self.accept_next_alert = True\r\n \r\n def tearDown(self):\r\n self.driver.quit()\r\n self.assertEqual([], self.verificationErrors)\r\n\r\nif __name__ == \"__main__\":\r\n\r\n\r\n findbyid = DemoFindElementByID()\r\n findbyid.locate_by_id_demo()\r\n","sub_path":"ig mail.py","file_name":"ig mail.py","file_ext":"py","file_size_in_byte":5040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"55421363","text":"import argparse\nimport pprint\nimport torch\nfrom torch import optim\nimport torch.nn as nn\n\nfrom modules.data_loader import DataLoader\nimport modules.data_loader as data_loader\nfrom modules.transformer import Transformer\nfrom modules.trainer import Trainer\nfrom modules.trainer import IgniteEngine\n\n\ndef define_argparser(is_continue=False):\n p = argparse.ArgumentParser()\n\n if is_continue:\n p.add_argument(\n '--load_fn',\n required=True,\n help='Model file name to continue.'\n )\n\n p.add_argument(\n '--model_fn',\n default='./models/model.pth',\n help='Model file name to save. Additional information would be annotated to the file name.'\n )\n p.add_argument(\n '--train',\n default='./data/corpus.shuf.train.tok.bpe',\n help='Training set file name except the extention. (ex: train.en --> train)'\n )\n p.add_argument(\n '--valid',\n default='./data/corpus.shuf.valid.tok.bpe',\n help='Validation set file name except the extention. (ex: valid.en --> valid)'\n )\n p.add_argument(\n '--lang',\n default='enko',\n help='Set of extention represents language pair. (ex: en + ko --> enko)'\n )\n p.add_argument(\n '--gpu_id',\n type=int,\n default=0,\n help='GPU ID to train. Currently, GPU parallel is not supported. -1 for CPU. Default=%(default)s'\n )\n\n p.add_argument(\n '--batch_size',\n type=int,\n default=128,\n help='Mini batch size for gradient descent. Default=%(default)s'\n )\n p.add_argument(\n '--n_epochs',\n type=int,\n default=30,\n help='Number of epochs to train. Default=%(default)s'\n )\n p.add_argument(\n '--verbose',\n type=int,\n default=2,\n help='VERBOSE_SILENT, VERBOSE_EPOCH_WISE, VERBOSE_BATCH_WISE = 0, 1, 2. Default=%(default)s'\n )\n p.add_argument(\n '--init_epoch',\n required=is_continue,\n type=int,\n default=1,\n help='Set initial epoch number, which can be useful in continue training. Default=%(default)s'\n )\n\n p.add_argument(\n '--max_length',\n type=int,\n default=100,\n help='Maximum length of the training sequence. Default=%(default)s'\n )\n p.add_argument(\n '--dropout',\n type=float,\n default=.2,\n help='Dropout rate. Default=%(default)s'\n )\n p.add_argument(\n '--hidden_size',\n type=int,\n default=768,\n help='Hidden size of LSTM. Default=%(default)s'\n )\n p.add_argument(\n '--n_layers',\n type=int,\n default=4,\n help='Number of layers in LSTM. Default=%(default)s'\n )\n p.add_argument(\n '--max_grad_norm',\n type=float,\n default=1e+8,\n help='Threshold for gradient clipping. Default=%(default)s'\n )\n p.add_argument(\n '--iteration_per_update',\n type=int,\n default=32,\n help='Number of feed-forward iterations for one parameter update. Default=%(default)s'\n )\n p.add_argument(\n '--lr',\n type=float,\n default=1e-3,\n help='Initial learning rate. Default=%(default)s',\n )\n p.add_argument(\n '--n_splits',\n type=int,\n default=8,\n help='Number of heads in multi-head attention in Transformer. Default=%(default)s',\n )\n\n config = p.parse_args()\n\n return config\n\n\ndef get_model(input_size, output_size, config):\n model = Transformer(\n input_size, # Source vocabulary size\n config.hidden_size, # Transformer doesn't need word_vec_size.\n output_size, # Target vocabulary size\n n_splits=config.n_splits, # Number of head in Multi-head Attention.\n n_enc_blocks=config.n_layers,# Number of encoder blocks\n n_dec_blocks=config.n_layers,# Number of decoder blocks\n dropout_p=config.dropout, # Dropout rate on each block\n )\n return model\n\n\ndef get_crit(output_size, pad_index):\n # PAD 토큰에 대하여 가중치를 주지 않도록 설정\n loss_weight = torch.ones(output_size)\n loss_weight[pad_index] = 0.\n crit = nn.NLLLoss(\n weight=loss_weight,\n reduction='sum'\n )\n return crit\n\n\ndef get_optimizer(model, config):\n optimizer = optim.Adam(\n model.parameters(),\n lr=config.lr,\n betas=(.9, .98) # In Pre-LN Paper\n ) \n return optimizer\n\n\ndef main(config, model_weight=None, opt_weight=None):\n def print_config(config):\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(vars(config))\n print_config(config)\n\n loader = DataLoader(\n config.train,\n config.valid,\n (config.lang[:2], config.lang[-2:]),\n batch_size=config.batch_size,\n device=-1,\n max_length=config.max_length\n )\n\n input_size, output_size = len(loader.src.vocab), len(loader.tgt.vocab)\n model = get_model(input_size, output_size, config)\n crit = get_crit(output_size, data_loader.PAD)\n\n if model_weight:\n model.load_state_dict(model_weight)\n\n if config.gpu_id >= 0:\n model.cuda(config.gpu_id)\n crit.cuda(config.gpu_id)\n\n optimizer = get_optimizer(model, config)\n\n if opt_weight:\n optimizer.load_state_dict(opt_weight)\n\n lr_scheduler = None\n\n if config.verbose >= 2:\n print(model)\n print(crit)\n print(optimizer)\n\n trainer = Trainer(IgniteEngine, config)\n trainer.train(\n model,\n crit,\n optimizer,\n train_loader=loader.train_iter,\n valid_loader=loader.valid_iter,\n src_vocab=loader.src.vocab,\n tgt_vocab=loader.tgt.vocab,\n n_epochs=config.n_epochs,\n lr_scheduler=lr_scheduler\n )\n\n\nif __name__ == '__main__':\n config = define_argparser()\n main(config)\n","sub_path":"src/12_transformer/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"258466290","text":"# amg_test.py Basic test of AMG8833 sensor\n\n# Released under the MIT licence.\n# Copyright (c) Peter Hinch 2019\n\nimport machine\nimport utime\nfrom amg88xx import AMG88XX\n\n\ni2c = machine.I2C(1)\nsensor = AMG88XX(i2c)\nwhile True:\n utime.sleep(0.2)\n sensor.refresh()\n for row in range(8):\n print()\n for col in range(8):\n print('{:4d}'.format(sensor[row, col]), end='')\n","sub_path":"Lopy4/amg_test.py","file_name":"amg_test.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"331317730","text":"\n'''\nDescription: Opens a census workbook, reads data and puts into a initialized data structure (nested dictionaries)\nsaves data as .py, retrieving data is described below\nPage: 296\nStatus: Done\n\n'''\n\nimport openpyxl, pprint\n\nprint(\"Opening workbook...\")\n\nwb=openpyxl.load_workbook('censuspopdata.xlsx') #open excel file and assign it to wb\n\nsheet=wb.get_active_sheet() #take active sheet and assign it to sheet\n\ncountyData={}\n\nprint('Reading Data...')\n\nfor row in range(2, sheet.max_row+1):\n state=sheet['B'+str(row)].value\n county=sheet['C'+str(row)].value\n pop=sheet['D'+str(row)].value\n\n countyData.setdefault(state,{})\n countyData[state].setdefault(county,{'tracts': 0,'pop' : 0 })\n\n countyData[state][county]['tracts']+=1 #each row represents one tract\n countyData[state][county]['pop']+=int(pop) #update the population for each county\n\nprint('Writing results...')\nresultFile=open('census2010.py','w')\nresultFile.write('allData = ' + pprint.pformat(countyData)) #create a python file with a variable allData set to countyData\nresultFile.close()\nprint('Done')\n\n\"\"\"\nBy writing the results to a .py file it can be imported and used like any other python file\nimport os\nos.chdir(current directory)\nimport census 2010\ncensus2010.allData[state][county]\n>> {'pop': x, 'tracts': y}\n\n\"\"\"\n","sub_path":"ATBS/Chapter 12/readCensusExcel.py","file_name":"readCensusExcel.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"285245509","text":"# coding:utf-8\nimport os\nfrom queue import Queue\n\nfrom common.crawler.kuwo_music_crawler import KuWoMusicCrawler\nfrom PyQt5.QtCore import QThread, pyqtSignal\n\n\nclass DownloadSongThread(QThread):\n\n downloadOneSongCompleteSig = pyqtSignal()\n\n def __init__(self, downloadFolder: str, parent=None):\n super().__init__(parent=parent)\n self.downloadFolder = downloadFolder\n self.crawler = KuWoMusicCrawler()\n self.download_queque = Queue() # 下载队列,内含 songInfo\n\n def run(self):\n \"\"\" 下载歌曲 \"\"\"\n os.makedirs(self.downloadFolder, exist_ok=True)\n\n while not self.download_queque.empty():\n songInfo, quality = self.download_queque.get()\n\n # 发送下载音乐请求\n self.crawler.downloadSong(songInfo, self.downloadFolder, quality)\n\n # 发送完成一首歌下载信号\n self.downloadOneSongCompleteSig.emit()\n\n def appendDownloadTask(self, songInfo: dict, quality='Standard quality'):\n \"\"\" 添加下载任务 \"\"\"\n self.download_queque.put((songInfo, quality))\n","sub_path":"app/common/thread/download_song_thread.py","file_name":"download_song_thread.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"26354203","text":"from MDP.modules import Material\nfrom MDP.module_objects import materials, devices, stages, stage_names\nfrom RL.agents import init_Agents\n\n\n# 将订单初始化为Material\norderlist = {}\n# 定义处理订单device的动作,即将所有未分配的产品(remain)按订单中产品的分布分发给订单, 为实际产品发布demand\ndef orderdevice():\n return\n\n\n# 初始化每轮的设备从devices中取得 stage_id: devices_list (str:[Device]\ndevicesInStages = {}\n\n\n#初始化agents\nall_agents = {}\nfor stage_id in stage_names:\n stages[stage_id].set_devices(devicesInStages[stage_id])\n stage_info = stages[stage_id].info\n agents = init_Agents(stage_info)\n all_agents[stage_id] = all_agents\n\n# MDP\nwhile True:\n # 首先是订单stage,\n orderdevice()\n\n # 自上而下的决策stage\n for stage_id in stage_names:\n stages[stage_id].step(all_agents[stage_id])\n\n # Materials 数据更新\n [materials[id].step() for id in materials.keys()]\n","sub_path":"DEMO/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"7627889","text":"import numpy as np\nfrom doubleauction.environments import MarketEnvironment\nfrom doubleauction.matchers import RandomMatcher\nfrom doubleauction.agents.linear_generic_agent import LinearGenericBuyer, LinearGenericSeller\nimport matplotlib.pyplot as plt\nimport warnings\n# pandas setting warnings can be ignored, as it is intendend often\nwarnings.simplefilter(\"ignore\")\n\n\n# Define the initial number of agents, the number of rounds and games\nn_sellers = 40\nn_buyers = 40\nn_game = 1\nn_round = 5\n\n# Create initial agents with names and reservation prices\n# All agents are the same for now\nsetting = {\n 'self_last_offer': True,\n 'same_side_last_offers': True,\n 'same_side_res_prices': True,\n 'same_side_not_done': True,\n 'other_side_last_offers': True,\n 'other_side_res_prices': True,\n 'other_side_not_done': True,\n 'completed_deals': True,\n 'current_time': True,\n 'max_time': True,\n 'n_sellers': True,\n 'n_buyers': True,\n 'previous_success': True\n}\nres_prices = np.random.normal(100, 5, n_sellers)\nnames = ['Seller ' + str(i) for i in range(1, n_sellers + 1)]\nsellers = np.array([LinearGenericSeller(agent_id=names[i], reservation_price=res_prices[i], setting=setting) for i in range(n_sellers)])\nres_prices = np.random.normal(200, 5, n_buyers)\nnames = ['Buyer ' + str(i) for i in range(1, n_buyers + 1)]\nbuyers = np.array([LinearGenericBuyer(agent_id=names[i], reservation_price=res_prices[i], setting=setting) for i in range(n_buyers)])\n\n\n# For plotting\nfig, ax = plt.subplots(figsize=(8, 8), tight_layout=True)\nax.set_xlim(95, 205)\n\n# Loop over games\nfor g in range(n_game):\n print(\"GAME\", g, '=================================================================================================================')\n\n # Define parameters of each round\n max_time = 30\n matcher = RandomMatcher(reward_on_reference=True)\n\n # Create market environment\n market_env = MarketEnvironment(sellers=sellers, buyers=buyers, max_time=max_time, matcher=matcher)\n\n # HERE AGENTS LEARN AND ADJUST THEIR COEFS (for now the are constant)\n for agent in sellers:\n size_coefs = agent.determine_size_of_coefs(n_buyers=n_buyers, n_sellers=n_sellers)\n agent.coefs = np.array([0.05, 0.95] + [0]*(size_coefs - 2))\n for agent in buyers:\n size_coefs = agent.determine_size_of_coefs(n_buyers=n_buyers, n_sellers=n_sellers)\n agent.coefs = np.array([0.05, 0.95] + [0]*(size_coefs - 2))\n\n # Reset agents' rewards and observations\n for agent in sellers:\n agent.reward = 0.0\n agent.observations = {}\n for agent in buyers:\n agent.reward = 0.0\n agent.observations = {}\n\n # Loop over rounds\n for r in range(n_round):\n print(\"ROUND\", r, '-----------------------------------------------')\n\n # Reset market environment\n market_env.reset()\n\n # Initial offers are generated\n current_offers = {}\n for agent in sellers:\n current_offers[agent.agent_id] = np.random.normal(200, 5)\n for agent in buyers:\n current_offers[agent.agent_id] = np.random.normal(100, 5)\n\n # Loop over time steps\n i = 0\n while market_env.if_round_done is False:\n print(i, '-------')\n i += 1\n # Environment calculates what happens\n market_env.step(current_offers)\n\n # All agents receive observations from what environment generated\n for agent in sellers:\n agent.receive_observations_from_environment(market_env)\n for agent in buyers:\n agent.receive_observations_from_environment(market_env)\n\n # Clearing current offers\n current_offers.clear()\n\n # Agents who are not done yet decide on a new offer which are then inserted into the dictionary of current_offers\n for agent in sellers[market_env.not_done_sellers]:\n new_offer = agent.decide(n_sellers=n_sellers, n_buyers=n_buyers, max_time=max_time)\n current_offers[agent.agent_id] = new_offer\n for agent in buyers[market_env.not_done_buyers]:\n new_offer = agent.decide(n_sellers=n_sellers, n_buyers=n_buyers, max_time=max_time)\n current_offers[agent.agent_id] = new_offer\n\n # for plotting\n _, _, bars0 = ax.hist(list(current_offers.values()), 50, color='blue')\n plt.draw()\n plt.pause(0.1)\n _ = [b.remove() for b in bars0]\n","sub_path":"code/test_linear_generic_agent.py","file_name":"test_linear_generic_agent.py","file_ext":"py","file_size_in_byte":4493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"578380679","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass CustomRule(Model):\n \"\"\"Defines contents of a web application rule.\n\n All required parameters must be populated in order to send to Azure.\n\n :param name: Describes the name of the rule.\n :type name: str\n :param priority: Required. Describes priority of the rule. Rules with a\n lower value will be evaluated before rules with a higher value.\n :type priority: int\n :param enabled_state: Describes if the custom rule is in enabled or\n disabled state. Defaults to Enabled if not specified. Possible values\n include: 'Disabled', 'Enabled'\n :type enabled_state: str or\n ~azure.mgmt.frontdoor.models.CustomRuleEnabledState\n :param rule_type: Required. Describes type of rule. Possible values\n include: 'MatchRule', 'RateLimitRule'\n :type rule_type: str or ~azure.mgmt.frontdoor.models.RuleType\n :param rate_limit_duration_in_minutes: Time window for resetting the rate\n limit count. Default is 1 minute.\n :type rate_limit_duration_in_minutes: int\n :param rate_limit_threshold: Number of allowed requests per client within\n the time window.\n :type rate_limit_threshold: int\n :param match_conditions: Required. List of match conditions.\n :type match_conditions: list[~azure.mgmt.frontdoor.models.MatchCondition]\n :param action: Required. Describes what action to be applied when rule\n matches. Possible values include: 'Allow', 'Block', 'Log', 'Redirect'\n :type action: str or ~azure.mgmt.frontdoor.models.ActionType\n \"\"\"\n\n _validation = {\n 'name': {'max_length': 128},\n 'priority': {'required': True},\n 'rule_type': {'required': True},\n 'rate_limit_duration_in_minutes': {'maximum': 5, 'minimum': 0},\n 'rate_limit_threshold': {'minimum': 0},\n 'match_conditions': {'required': True},\n 'action': {'required': True},\n }\n\n _attribute_map = {\n 'name': {'key': 'name', 'type': 'str'},\n 'priority': {'key': 'priority', 'type': 'int'},\n 'enabled_state': {'key': 'enabledState', 'type': 'str'},\n 'rule_type': {'key': 'ruleType', 'type': 'str'},\n 'rate_limit_duration_in_minutes': {'key': 'rateLimitDurationInMinutes', 'type': 'int'},\n 'rate_limit_threshold': {'key': 'rateLimitThreshold', 'type': 'int'},\n 'match_conditions': {'key': 'matchConditions', 'type': '[MatchCondition]'},\n 'action': {'key': 'action', 'type': 'str'},\n }\n\n def __init__(self, **kwargs):\n super(CustomRule, self).__init__(**kwargs)\n self.name = kwargs.get('name', None)\n self.priority = kwargs.get('priority', None)\n self.enabled_state = kwargs.get('enabled_state', None)\n self.rule_type = kwargs.get('rule_type', None)\n self.rate_limit_duration_in_minutes = kwargs.get('rate_limit_duration_in_minutes', None)\n self.rate_limit_threshold = kwargs.get('rate_limit_threshold', None)\n self.match_conditions = kwargs.get('match_conditions', None)\n self.action = kwargs.get('action', None)\n","sub_path":"src/front-door/azext_front_door/vendored_sdks/models/custom_rule.py","file_name":"custom_rule.py","file_ext":"py","file_size_in_byte":3533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"363208097","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 2 12:04:55 2017\n\n@author: kamran\n\"\"\"\ncount = 0\ndef isIn(char, aStr):\n '''\n char: a single character\n aStr: an alphabetized string\n \n returns: True if char is in aStr; False otherwise\n '''\n \n \n if len(aStr) == 0:\n return False\n \n if len(aStr) == 1:\n if aStr == char:\n return True\n else: \n return False\n \n middle_char = aStr[int(len(aStr)/2)]\n \n \n if char == middle_char:\n return True\n \n if char < middle_char:\n return isIn(char, aStr[:int(len(aStr)/2)])\n \n if char > middle_char:\n return isIn(char, aStr[int(len(aStr)/2)+1:])\n \n ","sub_path":"mit-python/week2/isIn.py","file_name":"isIn.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"94623860","text":"#!/usr/bin/env python\n\"\"\"\nBasic swarm simulation based on Collective Memory and Spatial Sorting in Animal Groups by Couzin et al. (2002)\nauthor: Mitchell Scott\ncontact: mitchell.scott@wsu.edu\n\"\"\"\nimport rospy \nimport numpy as np \nfrom numpy import cos, sin\nfrom scipy.spatial.distance import cdist\nfrom geometry_msgs.msg import PoseArray, Pose, TransformStamped, Twist\nimport tf\nimport time\nimport tf2_msgs.msg\nfrom simple_swarm.msg import VelocityArray\n\nclass Swarm:\n def __init__(self):\n '''\n INIT AGENT DISTRIBUTION. UNTIS IN m\n '''\n self.time_step = 0.1\n self.r = rospy.Rate(1/self.time_step)\n self.max_angular_velocity = np.radians(80)\n self.time_inital = time.time()\n '''\n Setup inital distribution bounds (3D box)\n '''\n x_min = -1\n x_max = 1\n y_min = -1\n y_max = 1\n z_min = -1\n z_max = 1\n self.agents_number = 100\n self.speed = 6 #Agent speed (m/s)\n '''\n Inital agent distribution\n '''\n self.x_pos = np.random.uniform(low = x_min, high = x_max, size = self.agents_number)\n self.y_pos = np.random.uniform(low = y_min, high = y_max, size = self.agents_number)\n self.z_pos = np.random.uniform(low = z_min, high = z_max, size = self.agents_number)\n roll = np.random.uniform(low = -np.pi, high = np.pi, size = self.agents_number)\n pitch = np.random.uniform(low = -np.pi, high = np.pi, size = self.agents_number)\n yaw = np.random.uniform(low = -np.pi, high = np.pi, size = self.agents_number)\n '''\n Create radom distribution, and create a rotation matrix for each agents 3D orientation\n '''\n orientation = []\n for i in range(0, self.agents_number):\n new_roll = roll[i]\n new_pitch = pitch[i]\n yaw_setpoint = yaw[i]\n r1 = [cos(yaw_setpoint)*cos(new_pitch),\n cos(yaw_setpoint)*sin(new_pitch)*sin(new_roll) - sin(yaw_setpoint)*cos(new_roll),\n cos(yaw_setpoint)*sin(new_pitch)*cos(new_roll) + sin(yaw_setpoint)*sin(new_roll)]\n r2 = [sin(yaw_setpoint)*cos(new_pitch), \n sin(yaw_setpoint)*sin(new_pitch)*sin(new_roll) + cos(yaw_setpoint)*cos(new_roll), \n sin(yaw_setpoint)*sin(new_pitch)*cos(new_roll) - cos(yaw_setpoint)*sin(new_roll)]\n r3 = [-sin(new_pitch), cos(new_pitch)*sin(new_roll), cos(new_pitch)*cos(new_roll)]\n R_xyz = np.array([r1, r2, r3]) \n orient = np.dot(R_xyz, [1,1,1])\n orientation.append(list(np.divide(orient, [np.linalg.norm(orient)]*len(orient))))\n \n self.orientation = np.array(orientation)\n '''Vector of locations'''\n self.pos = np.array([(self.x_pos[i], self.y_pos[i], self.z_pos[i]) for i in range(0, len(self.x_pos))])\n\n self.dist = cdist(self.pos, self.pos) #distance between all agents\n \n '''\n REPULSION AND ATTRACTION PARAMETERS\n '''\n self.rr = 1\n self.ro = 4\n self.ra = 40\n \n self.delta_ro = self.ro - self.rr\n self.delta_ra = self.ra - self.ro\n '''\n ROS Publishers\n '''\n self.pose = PoseArray()\n self.pose_publisher = rospy.Publisher('/swarm_pose', PoseArray, queue_size = 10)\n self.pub_tf = rospy.Publisher(\"/tf\", tf2_msgs.msg.TFMessage, queue_size=1)\n self.twist_msg = VelocityArray()\n self.twist_publisher = rospy.Publisher('/swarm_velocities', VelocityArray, queue_size = 10)\n \n def main(self):\n '''\n Main function to run throw the swarm simulation\n '''\n while not rospy.is_shutdown():\n self.full_state_vector = []\n main_agent = 0\n orientation = []\n position = []\n \n for agent in self.pos: #loop through agents\n \n main_agent += 1 \n \n agent_heading = self.orientation[main_agent - 1]\n '''Distance from main agent to all other agents'''\n neighor_distance = self.dist[main_agent - 1] \n '''List which defines which behavior we want the agent to exhibit with this particular neighbor'''\n neighbor_behavior = [] \n neighbor_agent = 0\n for neighbor_agent_distance in neighor_distance:\n neighbor_agent += 1\n \n if neighbor_agent == main_agent:\n neighbor_behavior.append(None)\n else:\n if neighbor_agent_distance < self.rr:\n neighbor_behavior.append('Repulsion')\n else:\n if neighbor_agent_distance < self.delta_ro:\n neighbor_behavior.append('Orientation')\n elif neighbor_agent_distance < self.delta_ra:\n neighbor_behavior.append('Attraction')\n else:\n neighbor_behavior.append(None)\n '''\n Now that list for neighbor behavior is constructed, create desired behavior\n '''\n if 'Repulsion' in neighbor_behavior:\n '''\n If any agents are seen as repulsive, the agent will only move in a repulsion manner\n '''\n repulsion_index = [val for val, index in enumerate(neighbor_behavior) if index == 'Repulsion']\n repulsion_list = []\n for index in repulsion_index:\n vect_dist = np.subtract(self.pos[index], self.pos[main_agent - 1]) \n r_ij = np.divide(vect_dist, [np.linalg.norm(vect_dist)]*len(vect_dist))\n repulsion_list.append(r_ij)\n di = -sum(repulsion_list) #di is the ideal t+1 location\n else:\n '''\n If no agents are repulsive, agent exhibits attractive and/or orientation behavior\n '''\n if 'Orientation' in neighbor_behavior:\n orientation_index = [val for val, index in enumerate(neighbor_behavior) if index == 'Orientation']\n orientation_list = []\n for index in orientation_index:\n neighbor_heading = self.orientation[index] \n r_ij = np.divide(neighbor_heading, [np.linalg.norm(neighbor_heading)]*len(neighbor_heading))\n orientation_list.append(r_ij)\n do = sum(orientation_list) \n if 'Attraction' in neighbor_behavior:\n attraction_index = [val for val, index in enumerate(neighbor_behavior) if index == 'Attraction']\n attraction_list = []\n for index in attraction_index:\n vect_dist = np.subtract(self.pos[index], self.pos[main_agent - 1]) \n r_ij = np.divide(vect_dist, [np.linalg.norm(vect_dist)]*len(vect_dist))\n attraction_list.append(r_ij)\n da = sum(attraction_list) \n \n if 'Orientation' in neighbor_behavior and 'Attraction' in neighbor_behavior:\n di = np.divide(np.add(do, da), [2]*len(do))\n \n elif 'Orientation' in neighbor_behavior:\n di = do\n elif 'Attraction' in neighbor_behavior:\n di = da\n else:\n di = self.orientation[index] \n \n '''di represents new heading vector. calculate the angle between and turn towards it'''\n \n angle_between = np.inner(agent_heading, di)/(np.linalg.norm(agent_heading)*np.linalg.norm(di))\n if abs(angle_between) > self.max_angular_velocity*self.time_step: #if the angle between the two vectors is too large, only turn maximium distance \n max_turn = self.max_angular_velocity*self.time_step\n normal_vector = np.cross(agent_heading, di) \n normal_vector = np.divide(normal_vector, [np.linalg.norm(normal_vector)]*len(normal_vector))\n vi = np.cos(max_turn)*agent_heading + sin(max_turn)*(np.cross(normal_vector, agent_heading))\n else:\n normal_vector = np.cross(agent_heading, di) \n normal_vector = np.divide(normal_vector, [np.linalg.norm(normal_vector)]*len(normal_vector))\n vi = np.cos(angle_between)*agent_heading + sin(angle_between)*(np.cross(normal_vector, agent_heading))\n norm_vi = np.divide(vi, [np.linalg.norm(vi)]*len(vi))\n orientation.append(norm_vi)\n \n '''\n With new orientation known, calculate knew pose\n '''\n pose = np.add(self.pos[main_agent - 1], np.multiply(norm_vi, [self.time_step*self.speed]*len(norm_vi)))\n position.append(pose)\n \n '''\n Update position and orientation\n '''\n self.orientation = np.array(orientation)\n self.pos = np.array(position) \n \n '''\n Organize ROS transforms\n '''\n point_data = []\n for i in range(0, self.agents_number):\n '''\n Positions\n '''\n pos = Pose()\n pos.position.x = self.pos[i][0]\n pos.position.y = self.pos[i][1]\n pos.position.z = self.pos[i][2]\n \n agent_frame = self.orientation[i]\n '''\n Get angles from orientation data\n '''\n roll = np.arctan(agent_frame[0]/agent_frame[2])\n pitch = 0\n yaw = np.arctan2(agent_frame[1],agent_frame[0]) \n quaternion = tf.transformations.quaternion_from_euler(roll, pitch, yaw)\n pos.orientation.x = quaternion[0]\n pos.orientation.y = quaternion[1]\n pos.orientation.z = quaternion[2]\n pos.orientation.w = quaternion[3]\n point_data.append(pos) \n \n '''\n Publish transformations\n '''\n t = TransformStamped()\n t.header.frame_id = 'map'\n t.header.stamp = rospy.Time.now()\n t.transform.translation.x = 0.0\n t.transform.translation.y = 0.0\n t.transform.translation.z = 0.0\n\n t.transform.rotation.x = 0.0\n t.transform.rotation.y = 0.0\n t.transform.rotation.z = 0.0\n t.transform.rotation.w = 1.0\n t.child_frame_id = \"inital_frame\"\n tfm = tf2_msgs.msg.TFMessage([t]) \n self.pub_tf.publish(tfm)\n '''\n Publish Pose's\n '''\n self.pose.header.frame_id = 'map' \n self.pose.header.stamp = rospy.Time.now()\n self.pose.poses = point_data\n self.pose_publisher.publish(self.pose)\n \n self.dist = cdist(self.pos, self.pos) \n \n '''\n Publish Twists\n '''\n twist_list = []\n for i in range(0, self.agents_number):\n twist = Twist()\n orient = self.orientation[i]\n twist.linear.x = self.speed*orient[0]\n twist.linear.y = self.speed*orient[1]\n twist_list.append(twist)\n self.twist_msg.header.stamp = rospy.Time.now()\n self.twist_msg.header.frame_id = '/map'\n self.twist_msg.twists = twist_list\n self.twist_publisher.publish(self.twist_msg)\n \n self.r.sleep()\n \n\nif __name__ == '__main__':\n '''\n ROS node of a swarming system\n '''\n rospy.init_node('Swarm')\n S = Swarm()\n S.main()","sub_path":"src/couzin_swarm.py","file_name":"couzin_swarm.py","file_ext":"py","file_size_in_byte":12394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"321920219","text":"\n# coding: utf-8\n\n# In[11]:\n\ndef FinReportAmWellReportOCG_Nutrition(AmwellCDRdata, LooKupTables):\n import pandas as pd\n AmwellCDRdata=AmwellCDRdata[AmwellCDRdata['Sub Group']==\"Y\"]\n LooKupTables2=LooKupTables.dropna(axis=0)\n LooKupTables3 = LooKupTables[pd.isnull(LooKupTables['Health Plan'])]\n AmwellCDRdata=AmwellCDRdata[AmwellCDRdata['Provider Specialty']==\"Nutritionist\"]\n\n \n \n\n\n# In[12]:\n\n Join_Practice_HP= pd.merge(LooKupTables2, AmwellCDRdata, on = ['Practice Name', 'Health Plan'], how='inner')\n Join_Practice= pd.merge(LooKupTables3, AmwellCDRdata, on = ['Practice Name'], how='inner')\n GroupedJoin_Practice_HP=(Join_Practice_HP.groupby(['Client Name', 'RTE'])['Consumer Direct Payment (Base)','Health Plan Cost (Base)', 'RTE', 'OCG RD Visit Cost' ].agg(['sum', 'count', 'mean'])).reset_index()\n GroupedJoin_Practice=(Join_Practice.groupby(['Client Name', 'RTE'])['Consumer Direct Payment (Base)','Health Plan Cost (Base)', 'RTE', 'OCG RD Visit Cost' ].agg(['sum', 'count', 'mean'])).reset_index()\n GroupedJoin_Merge=GroupedJoin_Practice.append(GroupedJoin_Practice_HP)\n\n\n # In[5]:\n\n pd.set_option('display.float_format', lambda x: '%.3f' % x)\n PureCollection = pd.DataFrame()\n Temp1=(GroupedJoin_Merge['Client Name']).to_frame()\n PureCollection['Client Name']=Temp1['Client Name']\n PureCollection.reset_index()\n #PureCollection.drop('index', axis=0, inplace=True)\n\n Temp1=(GroupedJoin_Merge['Consumer Direct Payment (Base)']['count']).to_frame()\n PureCollection['VisitCount']=Temp1['count']\n PureCollection.reset_index()\n #PureCollection.drop('index', axis=0, inplace=True)\n\n Temp1=(GroupedJoin_Merge['Consumer Direct Payment (Base)']['sum']).to_frame()\n PureCollection['SUMConsumer Direct Payment (Base)']=Temp1['sum']\n PureCollection.reset_index()\n #PureCollection.drop('index', axis=0, inplace=True)\n\n Temp1=(GroupedJoin_Merge['Health Plan Cost (Base)']['sum']).to_frame()\n PureCollection['SUMHealth Plan Cost (Base)']=Temp1['sum']\n PureCollection.reset_index()\n #PureCollection.drop('index', axis=0, inplace=True)\n\n Temp1=(GroupedJoin_Merge['OCG RD Visit Cost']['sum']).to_frame()\n PureCollection['ClinicalServicesFee']=Temp1['sum']\n PureCollection.reset_index()\n #PureCollection.drop('index', axis=0, inplace=True)\n\n Temp1=(GroupedJoin_Merge['RTE']['mean']).to_frame()\n PureCollection['RTE(Y-1_N-0)']=Temp1['mean']\n PureCollection.reset_index()\n #PureCollection.drop('index', axis=0, inplace=True)\n\n PureCollection['TotMinusDirect']=PureCollection['ClinicalServicesFee']-PureCollection['SUMConsumer Direct Payment (Base)']\n PureCollection['TotMinusHP']=PureCollection['TotMinusDirect']-(PureCollection['SUMHealth Plan Cost (Base)']*PureCollection['RTE(Y-1_N-0)'])\n PureCollection['TotalAmount2Invoice']=PureCollection['TotMinusHP']\n\n # In[ ]:\n\n return PureCollection\n\ndef FinReportAmWellReportOCG_NutritionUnmatched(AmwellCDRdata, LooKupTables):\n import pandas as pd\n AmwellCDRdata=AmwellCDRdata[AmwellCDRdata['Sub Group']==\"Y\"]\n LooKupTables2=LooKupTables.dropna(axis=0)\n LooKupTables3 = LooKupTables[pd.isnull(LooKupTables['Health Plan'])]\n AmwellCDRdata=AmwellCDRdata[AmwellCDRdata['Provider Specialty']==\"Nutritionist\"]\n\n Join_Practice_HP= pd.merge(LooKupTables2, AmwellCDRdata, on = ['Practice Name', 'Health Plan'], how='inner')\n Join_Practice= pd.merge(LooKupTables3, AmwellCDRdata, on = ['Practice Name'], how='inner')\n GroupedJoin_Merge=Join_Practice_HP.append(Join_Practice)\n\n OCG_UrgentCareUnmatched= pd.merge(AmwellCDRdata, GroupedJoin_Merge, on = ['Internal Conversation ID'], how='left')\n OCG_UrgentCareUnmatched= OCG_UrgentCareUnmatched[pd.isnull(OCG_UrgentCareUnmatched['Client Name'])]\n\n\n # In[ ]:\n\n return OCG_UrgentCareUnmatched","sub_path":"FinReports/FinReportAmWellReportOCG_Nutrition.py","file_name":"FinReportAmWellReportOCG_Nutrition.py","file_ext":"py","file_size_in_byte":3784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"41224406","text":"import requests\nimport facebook\nimport json\nimport urllib\nimport sqlite3\nimport fb_token_info\nimport weatherAPI_token_info\n#from datetime import datetime as dt\nimport dateutil.parser\nfrom darksky import forecast\nimport webbrowser\nimport datetime\nimport yelp_token_info\nimport plotly\nimport plotly_token_info\nimport plotly.plotly as py\nimport plotly.graph_objs as go\n\n\n### PLOTLY SETUP\nplotly_username = plotly_token_info.username\nplotly_key = plotly_token_info.api_key\nplotly.tools.set_credentials_file(username = \"daniellaraz\", api_key = plotly_key)\n\n\n### FACEBOOK SETUP\ntoken = fb_token_info.access_token\ngraph = facebook.GraphAPI(token)\n\n### WEATHER SETUP\nweather_token = weatherAPI_token_info.token\n\n### YELP SETUP\nclient_id = yelp_token_info.my_id\nyelp_API_key = yelp_token_info.API_Key\nbearer_token = 'Bearer ' + yelp_API_key\n\n\n### CACHING SETUP FOR FACEBOOK\nCACHE_FNAME = \"my_facebook_testing.json\"\ntry:\n cache_file = open(CACHE_FNAME, 'r')\n cache_contents = cache_file.read()\n CACHED_POSTS = json.loads(cache_contents)\n cache_file.close()\nexcept:\n CACHED_POSTS = {}\n\n### CACHING SETUP FOR YELP\n\nCACHE_FNAME2 = \"yelp_TESTING.json\"\ntry:\n cache_file2 = open(CACHE_FNAME2, 'r')\n cache_contents2 = cache_file2.read()\n CACHED_POSTS2 = json.loads(cache_contents2)\n cache_file2.close()\nexcept:\n CACHED_POSTS2 = {}\n\n### FACEBOOK API: GETS ALL OF MY POSTS FROM FACEBOOK\ndef get_posts(CACHED_POSTS):\n if CACHED_POSTS:\n print('using cached data')\n list_of_pages = CACHED_POSTS\n else:\n print(\"Retrieving data from internet\")\n all_fields = ['message', 'created_time', 'description', 'caption', 'link', 'place', 'status_type']\n all_fields = ','.join(all_fields)\n posts = graph.get_connections('me','posts', fields = all_fields)\n\n posts_to_write = []\n list_of_pages = []\n\n while True:\n try:\n for post in posts['data']:\n posts_to_write.append(post)\n requests_data = requests.get(posts['paging']['next'])\n posts = requests_data.json()\n list_of_pages.append(posts)\n except KeyError:\n #ran out of posts\n break\n with open('my_facebook_testing.json','a') as f:\n json_encoded_posts = json.dumps(posts_to_write)\n f.write(json_encoded_posts)\n return list_of_pages\n\n### DARKSKYAPI: REQUESTS AND USES DARKSKY API TO GET THE TEMPERATURE AT THE PLACE AND TIME\n### WHERE AND WHEN I MADE A POST\ndef get_weather(latitude, longitude, time):\n\n latitude = str(latitude)\n longitude = str(longitude)\n baseURL = 'https://api.darksky.net/forecast/'\n newURL = baseURL + weather_token + '/' + latitude + ',' + longitude + ',' + time\n response = requests.get(newURL)\n data = response.json()\n return(data)\n\n### USING YELP API TO GET RESTAURANTS IN ANN ARBOR\ndef get_yelp(latitude, longitude):\n headers = {'authorization': bearer_token}\n if CACHED_POSTS2:\n print('using cached data')\n yelp_data = CACHED_POSTS2\n else:\n print(\"Retrieving data from internet\")\n latitude = str(latitude)\n longitude = str(longitude)\n baseURL2 = 'https://api.yelp.com/v3/businesses/search'\n newURL2 = baseURL2 + '?latitude=' + latitude + '&longitude=' + longitude\n response2 = requests.get(newURL2, headers = headers)\n yelp_data = response2.json()\n\n with open('yelp_TESTING.json','a') as f:\n json_yelp_encoded_posts2 = json.dumps(yelp_data)\n f.write(json_yelp_encoded_posts2)\n return yelp_data\n\nmy_facebook = get_posts(CACHED_POSTS)\ntop_hundred = my_facebook[0:100]\n\n### CREATING DATABASE CONNECTION\nconn = sqlite3.connect('Final_Project.sqlite')\ncur = conn.cursor()\n\n### CREATING TABLE NAMED Facebook WITH THESE COLUMNS\ncur.execute('DROP TABLE IF EXISTS Facebook')\ncur.execute('CREATE TABLE Facebook (id TEXT, status_type TEXT, message TEXT, created_time TIMESTAMP, day TEXT, time_bracket TEXT, link TEXT, longitude REAL, latitude REAL)')\n\n### CREATING TABLE NAMED Weather WITH THESE COLUMNS\ncur.execute('DROP TABLE IF EXISTS Weather')\ncur.execute('CREATE TABLE Weather (id TEXT, status_type TEXT, longitude REAL, latitude REAL, temperature REAL)')\n\n### CREATING TABLE NAMED Yelp WITH THESE COLUMNS\n### TAKING ALL THE POSTS THAT HAVE A LATITUDE AND LONGITUDE ASSOCIATED WITH THEM\n### AND FINDING THE TOP RESTAURANTS NEARBY\n### filtered by price range, distance from latlong where post was made, rating\ncur.execute('DROP TABLE IF EXISTS Restaurant_Nearby')\ncur.execute('CREATE TABLE Restaurant_Nearby (id TEXT, name TEXT, location TEXT, price_range TEXT, rating REAL)')\n\n### ITERATING THROUGH DATA AND INSERTING INTO FACEBOOK TABLE, THEN WEATHER TABLE\nfor index, my_posts in enumerate(top_hundred):\n\n ### GETTING THE LATITUDES AND LONGITUDES OF POSTS (SETTING TO 'NONE' IF THE\n ### LOCATION IS NOT AVAILABLE) BY ITERATING THROUGH NESTED DICTIONARIES\n dict_of_places = my_posts.get(\"place\", {})\n new_dict = dict_of_places.get(\"location\", {})\n latitude = new_dict.get(\"latitude\", None)\n longitude = new_dict.get(\"longitude\", None)\n time = my_posts[\"created_time\"]\n\n ### SPLITTING UP TIME TO YEAR, MONTH, DAY ELEMENTS, GETTING THE DAY OF WEEK\n split_date = time.split(\"T\")\n just_date = split_date[0]\n just_date_split = just_date.split(\"-\")\n year = just_date_split[0]\n month = just_date_split[1]\n day = just_date_split[2]\n facebook_day = datetime.date(int(year), int(month), int(day)).weekday()\n\n ### CONVERTING DIGIT TO THE CORRESPONDING NAME OF THE DAY OF THE WEEK\n if facebook_day == 0:\n facebook_day = \"Monday\"\n elif facebook_day == 1:\n facebook_day = \"Tuesday\"\n elif facebook_day == 2:\n facebook_day = \"Wednesday\"\n elif facebook_day == 3:\n facebook_day = \"Thursday\"\n elif facebook_day == 4:\n facebook_day = \"Friday\"\n elif facebook_day == 5:\n facebook_day = \"Saturday\"\n else:\n facebook_day = \"Sunday\"\n\n ### BREAKING DOWN TIME INTO ADDITIONAL DATA POINTS\n ### USING: 12:00am - 5:59am, 6:00am - 11:59pm, 12pm - 5:59 pm, and 6:00pm - 11:59pm\n just_time = split_date[1].split('+')\n just_time2 = just_time[0]\n split_time = just_time2.split(':')\n\n hour = int(split_time[0])\n minutes = int(split_time[1])\n seconds = int(split_time[2])\n\n if (hour <= 5):\n time_bracket = \"12:00am - 5:59am\"\n elif (hour <= 11):\n time_bracket = \"6:00am - 11:59am\"\n elif (hour <= 17):\n time_bracket = \"12:00pm - 5:59 pm\"\n else:\n time_bracket = \"6:00pm - 11:59pm\"\n\n ### CREATING AND INSERTING TUPLE W/ THE INFO I WANT IN THE FACEBOOK TABLE\n tuple1 = my_posts[\"id\"], my_posts[\"status_type\"], my_posts.get(\"message\", \"\"), time, facebook_day, time_bracket, my_posts.get(\"link\", \"\"), latitude, longitude\n cur.execute('INSERT INTO Facebook (id, status_type, message, created_time, day, time_bracket, link, latitude, longitude) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)', tuple1)\n\n ### CHECKING IF THERE IS A \"PLACE\" LISTED FOR THE POST\n if(type(latitude) == float):\n\n ### CALLING GET WEATHER FUNCTION AND PASSING THROUGH IT THE INFORMATION\n ### RETRIEVED FROM MY POSTS\n temp_at_time = get_weather(latitude, longitude, time)\n tuple2 = my_posts[\"id\"], my_posts[\"status_type\"], longitude, latitude, temp_at_time[\"currently\"][\"temperature\"]\n cur.execute('INSERT INTO Weather (id, status_type, longitude, latitude, temperature) VALUES (?, ?, ?, ?, ?)', tuple2)\n\n if index < 30:\n restaurants = get_yelp(latitude, longitude) #this is a dictionary\n y = restaurants[\"businesses\"]\n\n for elements in y: # for the elements in the list (dictionaries)\n names = elements.get(\"name\", None)\n ratings = elements.get(\"rating\", None)\n price = elements.get(\"price\", None)\n general_location = elements.get(\"location\", None)\n city_within_location = general_location.get(\"city\", None)\n\n tuple4 = my_posts[\"id\"], names, city_within_location, price, ratings\n cur.execute('INSERT INTO Restaurant_Nearby (id, name, location, price_range, rating) VALUES (?, ?, ?, ?, ?)', tuple4)\n\n\nconn.commit()\n\n### PLOTLY WORK\n\n\n### SELECTINGING ALL DAYS OF POSTS\ndays_info = cur.execute('SELECT day FROM Facebook')\ndays_info = days_info.fetchall() #fetchall() returns a list of tuples\n\nlist_of_frequencies = [0] * 7\n\ndays_of_week = ['Sundays', 'Mondays', 'Tuesdays', 'Wednesdays', 'Thursdays', 'Fridays', 'Saturdays']\ndict_of_indices = {}\n\n### ITERATING THROUGH TO FIND THE FREQUENCIES, GETTING A LIST OF FREQUENCIES, AND\n### A DICTIONARY THAT HAS INDICES ASSOCIATED WITH DAYS OF THE WEEK\n### (I.E. SUNDAY = 0, MONDAY = 1, AND SO FORTH)\nfor i, day in enumerate(days_of_week):\n dict_of_indices[day[0:-1]] = i\nfor tuples_of_days in days_info:\n x = tuples_of_days[0]\n index = dict_of_indices[x]\n list_of_frequencies[index] += 1\n\n### MAKING A PLOTLY BAR GRAPH WITH DAYS I AM MOST ACTIVE ON FACEBOOK\ndata = [go.Bar(\n x = days_of_week,\n y = list_of_frequencies\n )]\n\nlayout = go.Layout(title = 'Activity Level for Each Day of the Week', xaxis = dict(title = 'Day of the Week', titlefont = dict(family = 'Courier New, monospace', size = 18,\ncolor = '7f7f7f')), yaxis = dict(title = 'Number of Posts on Facebook', titlefont = dict(family = 'Courier New, monospace', size = 18, color = '7f7f7f')))\nfig = go.Figure(data = data, layout = layout)\npy.iplot(fig, filename = 'Most-Active-Days-On-Facebook')\n\n### report_dictionary fulfills the requirement from Part 1 - Basic Work where we\n### have to \"create a 'report' - (screen display, file output, or other\n### easy-to-read format) that shows how active you are on each day on the site.\n### ZIPPING TWO LISTS INTO A DICTIONARY OF KEY AND VALUE\nreport_dictionary = dict(zip(days_of_week, list_of_frequencies))\nprint(\"This is a 'report' of how active I am on each day on the site (number of posts per day): \")\nprint(report_dictionary)\n\n### SELECTINGING ALL DAYS OF POSTS\ntime_bracket_info = cur.execute('SELECT time_bracket FROM Facebook')\ntime_bracket_info = time_bracket_info.fetchall() #fetchall() returns a list of tuples\n\n### ITERATING THROUGH TO FIND THE FREQUENCIES, GETTING A LIST OF FREQUENCIES, AND\n### A DICTIONARY THAT HAS INDICES ASSOCIATED WITH TIMES OF DAY\n### (I.E. 12:00am - 5:59am = 0, 6:00am - 11:59am = 1, AND SO FORTH)\ntime_bracket_list = ['12:00am - 5:59am', '6:00am - 11:59am', '12:00pm - 5:59 pm', '6:00pm - 11:59pm']\nlist_of_time_frequencies = [0] * 4\ndict_of_time_indices = {}\nfor i, time in enumerate(time_bracket_list):\n dict_of_time_indices[time] = i\nfor tuples_of_times in time_bracket_info:\n x = tuples_of_times[0]\n index_for_time = dict_of_time_indices[x]\n list_of_time_frequencies[index_for_time] += 1\n\n### MAKING A PLOTLY BAR GRAPH WITH TIMES I AM MOST ACTIVE ON FACEBOOK\ndata2 = [go.Bar(\n x = time_bracket_list,\n y = list_of_time_frequencies\n )]\n\nlayout2 = go.Layout(title = 'Activity Level for Each Time Bracket', xaxis = dict(title = 'Time of Day', titlefont = dict(family = 'Courier New, monospace', size = 18,\ncolor = '7f7f7f')), yaxis = dict(title = 'Number of Posts on Facebook', titlefont = dict(family = 'Courier New, monospace', size = 18, color = '7f7f7f')))\nfig2 = go.Figure(data = data2, layout = layout2)\npy.iplot(fig2, filename = 'Most-Active-Time-Brackets-On-Facebook')\n\n### ZIPPING TWO LISTS INTO A DICTIONARY OF KEY AND VALUE\nreport_time_dictionary = dict(zip(time_bracket_list, list_of_time_frequencies))\nprint(\"This is a 'report' of how active I am during each time frame on the site (number of posts per time frame): \")\nprint(report_time_dictionary)\n\n\n\n### SELECTINGING ALL TEMPERATURES OF POSTS\ntemperature_info = cur.execute('SELECT temperature FROM Weather')\ntemperature_info = temperature_info.fetchall() #fetchall() returns a list of tuples\n\n### ITERATING THROUGH TO FIND FREQUENCIES OF HOW MANY POSTS IN EACH CATEGORY OF\n### TEMPERATURE\ntemperature_list = ['Frigid', 'Cold', 'Mild', 'Warm', 'Hot']\nlist_of_temp_frequencies = [0] * 5\ndict_of_temp_indices = {}\n\nfor i, temp in enumerate(temperature_list):\n dict_of_temp_indices[temp] = i\nfor tuples_of_temps in temperature_info:\n x = tuples_of_temps[0]\n if x <= 32:\n temp_category = 'Frigid'\n elif(x > 32 and x <= 45):\n temp_category = 'Cold'\n elif(x > 45 and x <= 55):\n temp_category = 'Mild'\n elif(x > 55 and x <= 65):\n temp_category = 'Warm'\n else:\n temp_category = 'Hot'\n index_for_temp = dict_of_temp_indices[temp_category]\n list_of_temp_frequencies[index_for_temp] += 1\n\n### MAKING A PLOTLY BAR GRAPH WITH TIMES I AM MOST ACTIVE ON FACEBOOK\ndata3 = [go.Bar(\n x = temperature_list,\n y = list_of_temp_frequencies\n )]\n\nlayout3 = go.Layout(title = 'Activity Level During Temperature Ranges', xaxis = dict(title = 'Weather', titlefont = dict(family = 'Courier New, monospace', size = 18,\ncolor = '7f7f7f')), yaxis = dict(title = 'Number of Posts on Facebook', titlefont = dict(family = 'Courier New, monospace', size = 18, color = '7f7f7f')))\nfig3 = go.Figure(data = data3, layout = layout3)\npy.iplot(fig3, filename = 'Most-Active-Weather-Range-On-Facebook')\n\n### ZIPPING TWO LISTS INTO A DICTIONARY OF KEY AND VALUE\nreport_temp_dictionary = dict(zip(temperature_list, list_of_temp_frequencies))\nprint(\"This is a 'report' of how active I am during each temperature category (number of posts per temperature category): \")\nprint(report_temp_dictionary)\n\n\n### SELECTINGING ALL TEMPERATURES OF POSTS\npricing_rating_info = cur.execute('SELECT price_range, rating FROM Restaurant_Nearby')\npricing_rating_info = pricing_rating_info.fetchall() #fetchall() returns a list of tuples\n\nprice_bracket_list = ['$', '$$', '$$$', '$$$$']\nlist_of_averages = []\nnum_prices = len(price_bracket_list)\nlist_of_price_frequencies = [0] * num_prices\ndict_of_price_indices = {}\nlist_of_rating_sum = [0] * num_prices\nlist_of_average_rating_per_price = []\nfor i, price in enumerate(price_bracket_list):\n dict_of_price_indices[price] = i\nfor tuples_of_pricing_and_rating in pricing_rating_info:\n price, rating = tuples_of_pricing_and_rating\n ### GETTING RID OF EUROS AND WHERE PRICING IS NONE\n if price != None and price != '€' and price != '€€' and price != '€€€' and price != '€€€€':\n index_for_price = dict_of_price_indices[price]\n list_of_price_frequencies[index_for_price] += 1\n list_of_rating_sum[index_for_price] += rating\n### FINDING AVERAGE BY DIVIDING SUM BY THE NUMBER OF NUMBERS\nfor i in range(num_prices):\n if(list_of_price_frequencies[i]!= 0):\n list_of_averages.append(list_of_rating_sum[i]/list_of_price_frequencies[i])\n else:\n list_of_averages.append(0)\n\n### MAKING A PLOTLY BAR GRAPH w/ COMPARISON OF PRICING AND RATINGS\n### DO PRICIER RESTAURANTS HAVE HIGHER RATINGS?\ndata4 = [go.Bar(\n x = price_bracket_list,\n y = list_of_averages\n )]\npy.iplot(data4, filename = 'Rating-Pricing-Comparison')\n\n### ZIPPING TWO LISTS INTO A DICTIONARY OF KEY AND VALUE\nreport_price_rating_dictionary = dict(zip(price_bracket_list, list_of_averages))\nprint(\"This is a 'report' of how the average ratings of a restaurant compare to its average priciness. Are more expensive restaurants rated higher on average?: \")\nprint(report_price_rating_dictionary)\n","sub_path":"Yelp_Facebook_FinalProject.py","file_name":"Yelp_Facebook_FinalProject.py","file_ext":"py","file_size_in_byte":15535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"357094448","text":"import numpy as np\r\nimport pandas as pd\r\nimport sys\r\n\r\nS, A, gamma, alpha, lambda_ = None, None, None, None, None\r\n\r\ndef initialize():\r\n Q = np.zeros((S,A))\r\n N = np.zeros((S,A))\r\n return Q, N\r\n\r\n\r\ndef update_model(sars_, Q, N):\r\n l = None\r\n for idx, row in sars_.iterrows():\r\n s, a, r, s_ = row['s']-1, row['a']-1, row['r']-1, row['sp']-1\r\n if l != None and l[3] == s:\r\n N[l[0], l[1]] += 1\r\n delta = r + gamma * Q[s,a] - Q[l[0], l[1]]\r\n Q[s][a] += alpha * delta * N[s,a]\r\n N[s][a] *= gamma * lambda_\r\n else:\r\n N[:,:] = 0\r\n l = (s, a, r, s_)\r\n return Q, N\r\n\r\n\r\ndef output_policy(Q, outfile):\r\n pi = np.argmax(Q, axis=-1)\r\n with open(outfile, \"w\") as f:\r\n for p in pi:\r\n f.write(str(p+1)+\"\\n\")\r\n\r\n\r\ndef compute(infile, outfile, k_max):\r\n Q, N = initialize()\r\n sars_ = pd.read_csv(infile)\r\n for _ in range(k_max):\r\n Q_new, N_new = update_model(sars_, Q, N)\r\n if np.sum(Q_new - Q) <= 1e-1:\r\n break\r\n Q, N = Q_new, N_new\r\n output_policy(Q, outfile)\r\n\r\n\r\ndef main():\r\n global S, A, gamma, alpha, lambda_\r\n if len(sys.argv) != 5:\r\n print(\"Usage should be: sarsa.py \")\r\n \r\n if sys.argv[1] == 'small':\r\n inputfilename = 'data/small.csv'\r\n outputfilename = 'small.policy'\r\n S = 100\r\n A = 4\r\n gamma = 0.95\r\n elif sys.argv[1] == 'medium':\r\n inputfilename = 'data/medium.csv'\r\n outputfilename = 'medium.policy'\r\n S = 50000\r\n A = 7\r\n gamma = 1\r\n elif sys.argv[1] == 'large':\r\n inputfilename = 'data/large.csv'\r\n outputfilename = 'large.policy'\r\n S = 312020\r\n A = 9\r\n gamma = 0.95\r\n else:\r\n print(\"No specified file type\")\r\n\r\n alpha = float(sys.argv[2])\r\n lambda_ = float(sys.argv[3])\r\n k_max = int(sys.argv[4])\r\n compute(inputfilename, outputfilename, k_max)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"project2/sarsa.py","file_name":"sarsa.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"331123639","text":"import scipy.io as scio\r\nimport numpy as np\r\n\r\ndata = scio.loadmat('ex3data1.mat')\r\ndata1 = data.get('X')\r\nlabel = data.get('y')\r\n\r\ndef sigmoid(x):\r\n return 1/(1+np.exp(-x))\r\n\r\nparameters = scio.loadmat('ex3weights.mat')\r\n\r\ntheta1 = parameters.get('Theta1')\r\ntheta2 = parameters.get('Theta2')\r\n\r\ndata_Neur = np.insert(data1, 0, 1, axis=1)\r\nhidden_layer = sigmoid(np.dot(data_Neur, theta1.T))\r\nz_2 = np.insert(hidden_layer, 0, 1, axis=1)\r\noutput_layer = sigmoid(np.dot(z_2, theta2.T))\r\nmax_prob = np.argmax(output_layer, axis=1)\r\nout = max_prob + 1\r\n\r\naccuracy = np.mean(out == label.T)\r\nprint('accuracy = {0}%'.format(accuracy * 100))","sub_path":"ex3/Neural Networks.py","file_name":"Neural Networks.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"259337856","text":"#!/usr/bin/env python\nimport logging\n\nfrom dipy.data import get_sphere\nfrom nose.tools import assert_equal\nimport numpy as np\nfrom scipy.spatial.distance import (cosine, euclidean, mahalanobis)\nfrom scipy.special import logsumexp, softmax\nimport torch\nfrom torch.nn.utils.rnn import PackedSequence\n\nfrom dwi_ml.models.direction_getter_models import (\n CosineRegressionDirectionGetter, FisherVonMisesDirectionGetter,\n GaussianMixtureDirectionGetter, L2RegressionDirectionGetter,\n SingleGaussianDirectionGetter, SphereClassificationDirectionGetter)\nfrom dwi_ml.models.utils.fisher_von_mises import (\n fisher_von_mises_log_prob_vector)\n\n\"\"\"\nIncluded utils are:\n test_cosine_regression_loss()\n - identical vectors\n - vectors with same angles\n - vectors at 90 degrees\n - vectors at 180 degrees\n - comparison with scipy.spatial.distance.cosine\n test_l2regression_loss()\n - identical vectors\n - comparison with scipy.spatial.distance.euclidean\n test_gaussian_loss()\n - x = mu\n - comparison with (manual + scipy)\n test_mixture_loss()\n - comparison with (manual + scipy)\n\"\"\"\n# toDo\n# test fisher von mises\n\ntol = 1e-5\nd = 3\n\nlogging.getLogger().setLevel(level='DEBUG')\n\n\ndef _independent_gaussian_log_prob_vector(x, mus, sigmas):\n \"\"\"\n Equivalent to the torch method in model.utils.gaussians. Easier to test.\n\n Parameters\n ----------\n x = the variable\n mu = mean of the gaussian (x,y,z directions)\n sigmas = standard deviation of the gaussian (x,y,z directions)\n \"\"\"\n # The inverse of a diagonal matrix is just inverting values on the\n # diagonal\n cov_inv = np.eye(d) * (1 / sigmas ** 2)\n\n # sum(log) = log(prod)\n logpdf = -d / 2 * np.log(2 * np.pi) - np.log(np.prod(sigmas)) \\\n - 0.5 * mahalanobis(x[:3], mus, cov_inv) ** 2\n return logpdf\n\n\ndef _get_random_vector(size=3):\n scaling = np.random.randint(1, 9)\n return np.array(np.random.randn(size), dtype=np.float32) * scaling\n\n\ndef _prepare_tensor(a):\n if isinstance(a, tuple):\n a = tuple([torch.as_tensor(i[None, :], dtype=torch.float32)\n for i in a])\n elif isinstance(a, np.ndarray):\n a = torch.as_tensor(a[None, :], dtype=torch.float32)\n return a\n\n\ndef _prepare_packedsequence(a):\n if not isinstance(a, PackedSequence):\n a = PackedSequence(data=(torch.as_tensor(a[None, :],\n dtype=torch.float32)),\n batch_sizes=torch.as_tensor([1]))\n return a\n\n\ndef _compute_loss_tensor(outputs, targets, model):\n outputs = _prepare_tensor(outputs)\n targets = _prepare_tensor(targets)\n\n mean_loss, _ = model.compute_loss(outputs, targets)\n # logging.debug(\"Means loss: {}.\".format(mean_loss))\n\n return np.asarray(mean_loss)\n\n\ndef test_cosine_regression_loss():\n logging.debug('Testing cosine regression loss')\n\n np.random.seed(1234)\n model = CosineRegressionDirectionGetter(3)\n\n logging.debug(\" - Identical vectors x: expecting -1\")\n a = np.array([1, 0, 0])\n b = np.array([1, 0, 0])\n expected = np.array(-1)\n value = _compute_loss_tensor(a, b, model)\n assert_equal(value, expected)\n\n logging.debug(\" - Identical vectors y: expecting -1\")\n a = np.array([0, 1, 0])\n b = np.array([0, 1, 0])\n expected = np.array(-1)\n value = _compute_loss_tensor(a, b, model)\n assert_equal(value, expected)\n\n logging.debug(\" - Identical vectors z: expecting -1\")\n a = np.array([0, 0, 1])\n b = np.array([0, 0, 1])\n expected = np.array(-1)\n value = _compute_loss_tensor(a, b, model)\n assert_equal(value, expected)\n\n logging.debug(\" - Vectors with same angle: expecting -1\")\n scales = np.random.random(20) * 20\n for s in scales:\n a = np.array([1, 0, 0])\n b = a * s\n expected = np.array(-1)\n value = _compute_loss_tensor(a, b, model)\n assert_equal(value, expected)\n\n logging.debug(\" - Vectors with at 90 degrees 1: expecting 0\")\n a = np.array([1, 0, 0])\n b = np.array([0, 1, 0])\n expected = np.array(0)\n value = _compute_loss_tensor(a, b, model)\n assert_equal(value, expected)\n\n logging.debug(\" - Vectors with at 90 degrees 2: expecting 0\")\n a = np.array([1, 0, 0])\n b = np.array([0, 0, 1])\n expected = np.array(0)\n value = _compute_loss_tensor(a, b, model)\n assert_equal(value, expected)\n\n logging.debug(\" - Vectors with at 90 degrees random: expecting 0\")\n for _ in range(20):\n a = _get_random_vector(3)\n b = _get_random_vector(3)\n c = np.cross(a, b)\n expected = np.array(0)\n\n value = _compute_loss_tensor(a, c, model)\n assert np.allclose(value, expected, atol=tol), \\\n \"Failed; got: {}; expected: {}\".format(value, expected)\n\n value = _compute_loss_tensor(b, c, model)\n assert np.allclose(value, expected, atol=tol), \\\n \"Failed; got: {}; expected: {}\".format(value, expected)\n\n logging.debug(\" - Vectors with at 180 degrees random: expecting 1\")\n for _ in range(20):\n a = _get_random_vector(3)\n b = np.array(-a * (np.random.random() + 1e-3) *\n np.random.randint(1, 10), dtype=np.float32)\n expected = np.array(1)\n\n value = _compute_loss_tensor(a, b, model)\n assert np.allclose(value, expected, atol=tol), \\\n \"Failed; got: {}; expected: {}\".format(value, expected)\n\n logging.debug(\" - Random vectors: comparing with cosine.\")\n for _ in range(200):\n a = _get_random_vector(3)\n b = _get_random_vector(3)\n # model outputs -cos(a,b), but cosine computes 1-cos(a,b)\n expected = cosine(a, b) - 1\n\n value = _compute_loss_tensor(a, b, model)\n assert np.allclose(value, expected, atol=tol), \\\n \"Failed; got: {}; expected: {}\".format(value, expected)\n\n\ndef test_l2regression_loss():\n logging.debug('\\nTesting l2 regression loss')\n\n np.random.seed(1234)\n model = L2RegressionDirectionGetter(1)\n\n logging.debug(\" - Identical vectors: expecting 0\")\n a = _get_random_vector(3)\n b = a\n expected = np.array(0)\n value = _compute_loss_tensor(a, b, model)\n assert np.allclose(value, expected, atol=tol),\\\n \"Failed; got: {}; expected: {}\".format(value, expected)\n\n # Test for random vector, compared to scipy's euclidean\n for _ in range(200):\n a = _get_random_vector(3)\n b = _get_random_vector(3)\n expected = euclidean(a, b)\n value = _compute_loss_tensor(a, b, model)\n assert np.allclose(value, expected, atol=tol),\\\n \"Failed; got: {}; expected: {}\".format(value, expected)\n\n\ndef test_sphere_classification_loss():\n logging.debug('\\nTesting sphere classification loss')\n\n model = SphereClassificationDirectionGetter(1)\n sphere = get_sphere('symmetric724')\n\n logging.debug(\" - Neg log likelihood, expecting -ln(softmax).\")\n\n logging.debug(\" - Exactly the right class.\")\n # exactly the right class (#1)\n # Note. To be realistic:\n # With as many classes (724), the value of the output must be very\n # high to have a low loss. The outputs (logits) don't have to be\n # probabilities, as a softmax will be applied by torch.\n logit = np.zeros((1, 724)).astype('float32')\n logit[0, 1] = 100\n b = sphere.vertices[1]\n expected = -np.log(softmax(logit))[0, 1]\n value = _compute_loss_tensor(logit, b, model)\n assert np.allclose(value, expected, atol=tol), \\\n \"Failed; got: {}; expected: {}\".format(value, expected)\n\n logging.debug(\" - With eps difference in the target: \"\n \"Should get the same class.\")\n logit = np.zeros((1, 724)).astype('float32')\n logit[0, 1] = 1\n eps = 1e-3\n b = sphere.vertices[1] + eps\n expected = -np.log(softmax(logit))[0, 1]\n value = _compute_loss_tensor(logit, b, model)\n assert np.allclose(value, expected, atol=tol), \\\n \"Failed; got: {}; expected: {}\".format(value, expected)\n\n logging.debug(\" - Exactly the right class test 2.\")\n logit = np.random.rand(1, 724).astype('float32')\n logit[0, 1] = 1\n b = sphere.vertices[1]\n expected = -np.log(softmax(logit))[0, 1]\n value = _compute_loss_tensor(logit, b, model)\n assert np.allclose(value, expected, atol=tol), \\\n \"Failed; got: {}; expected: {}\".format(value, expected)\n\n logging.debug(\" - Random\")\n logit = np.random.rand(1, 724).astype('float32')\n b = sphere.vertices[1]\n expected = -np.log(softmax(logit))[0, 1]\n value = _compute_loss_tensor(logit, b, model)\n assert np.allclose(value, expected, atol=tol), \\\n \"Failed; got: {}; expected: {}\".format(value, expected)\n\n\ndef test_gaussian_loss():\n logging.debug('\\nTesting gaussian loss')\n\n np.random.seed(1234)\n model = SingleGaussianDirectionGetter(1)\n\n logging.debug(\" - Expecting mahalanobis value\")\n\n logging.debug(\" - x = mu\")\n for _ in range(20):\n a_means = _get_random_vector(3)\n a_sigmas = np.exp(_get_random_vector(3))\n b = a_means\n\n # expected: x-mu = 0 ==> mahalanobis = 0\n expected = -(-3 / 2 * np.log(2 * np.pi) - np.log(np.prod(a_sigmas)))\n\n value = _compute_loss_tensor((a_means, a_sigmas), b, model)\n assert np.allclose(value, expected, atol=tol), \\\n \"Failed; got: {}; expected: {}\".format(value, expected)\n\n logging.debug(\" - random\")\n for _ in range(200):\n a_means = _get_random_vector(3)\n a_sigmas = np.exp(_get_random_vector(3))\n b = _get_random_vector(3)\n\n # Manual logpdf computation\n logpdf = _independent_gaussian_log_prob_vector(b, a_means, a_sigmas)\n expected = -logpdf\n\n value = _compute_loss_tensor((a_means, a_sigmas), b, model)\n assert np.allclose(value, expected, atol=tol), \\\n \"Failed; got: {}; expected: {}\".format(value, expected)\n\n\ndef test_mixture_loss():\n logging.debug('\\nTesting mixture loss')\n\n np.random.seed(1234)\n model = GaussianMixtureDirectionGetter(1)\n\n logging.debug(\" - Expecting neg logsumexp(log_mixture + logpdf)\")\n\n logging.debug(\" - Random\")\n for _ in range(200):\n # 3 Gaussians * (1 mixture param + 3 means + 3 variances)\n # (no correlations)\n a_mixture_logits = _get_random_vector(3)\n a_means = _get_random_vector(3 * 3).reshape((3, 3))\n a_sigmas = np.exp(_get_random_vector(3 * 3)).reshape((3, 3))\n b = _get_random_vector(3)\n\n # Manual logpdf computation\n mixture_params = softmax(a_mixture_logits)\n logpdfs = np.array(\n [_independent_gaussian_log_prob_vector(b, a_means[i], a_sigmas[i])\n for i in range(3)])\n expected = -logsumexp(np.log(mixture_params) + logpdfs)\n\n value = _compute_loss_tensor(\n (a_mixture_logits, a_means, a_sigmas), b, model)\n assert np.allclose(value, expected, atol=tol), \\\n \"Failed; got: {}; expected: {}\".format(value, expected)\n\n\ndef test_fisher_von_mises():\n logging.debug('\\nTesting fisher-Von mises loss')\n\n model = FisherVonMisesDirectionGetter(1)\n\n logging.debug(\" - Expecting log prob.\")\n\n logging.debug(\" - x = mu\")\n a_means = _get_random_vector(3)\n a_kappa = np.exp(_get_random_vector(1))\n b = a_means\n\n expected = -fisher_von_mises_log_prob_vector(a_means, a_kappa, b)\n value = _compute_loss_tensor((a_means, a_kappa), b, model)\n assert np.allclose(value, expected, atol=tol), \\\n \"Failed; got: {}; expected: {}\".format(value, expected)\n\n logging.debug(\" - Random\")\n a_means = _get_random_vector(3)\n a_kappa = np.exp(_get_random_vector(1))\n b = _get_random_vector(3)\n\n expected = -fisher_von_mises_log_prob_vector(a_means, a_kappa, b)\n value = _compute_loss_tensor((a_means, a_kappa), b, model)\n assert np.allclose(value, expected, atol=tol), \\\n \"Failed; got: {}; expected: {}\".format(value, expected)\n\n\nif __name__ == '__main__':\n np.random.seed(1234)\n test_cosine_regression_loss()\n test_fisher_von_mises()\n test_gaussian_loss()\n test_l2regression_loss()\n test_mixture_loss()\n test_sphere_classification_loss()\n","sub_path":"dwi_ml/tests/unit_tests/test_losses.py","file_name":"test_losses.py","file_ext":"py","file_size_in_byte":12249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"498294401","text":"\n# -*-coding:utf8-*-\n\n\nimport nonebot\nfrom aiocqhttp.exceptions import Error as CQHttpError\nfrom datetime import datetime\nimport random\nimport requests\nimport json\nimport time\n\n\nVR_uid_list=[61639371]\nVR_group_list=[\n [950253287,849437495]\n ]\nVR_name_list=['轴伊']\n\n\n@nonebot.scheduler.scheduled_job('interval',minutes=1)\nasync def _():\n bot = nonebot.get_bot()\n for i in range(min(len(VR_uid_list),len(VR_group_list))):\n res=''\n dynamic_content = GetDynamicStatus(VR_uid_list[i], i)\n for content in dynamic_content:\n try:\n for groupnum in VR_group_list[i]:\n res = await bot.send_group_msg(group_id=groupnum, message=content)\n except CQHttpError as e:\n pass\n\n live_status = GetLiveStatus(VR_uid_list[i])\n if live_status != '':\n for groupnum in VR_group_list[i]:\n await bot.send_group_msg(group_id=groupnum, message=VR_name_list[i] +' 开播啦啦啦!!! ' + live_status)\n\n\n\ndef GetDynamicStatus(uid, VRindex):\n res = requests.get('https://api.vc.bilibili.com/dynamic_svr/v1/dynamic_svr/space_history?host_uid='+str(uid)+'offset_dynamic_id=0')\n res.encoding='utf-8'\n res = res.text\n #res = res.encode('utf-8')\n cards_data = json.loads(res)\n cards_data = cards_data['data']['cards']\n try:\n with open(str(uid)+'Dynamic','r') as f:\n last_dynamic_str = f.read()\n f.close()\n except Exception as err:\n last_dynamic_str=''\n pass\n if last_dynamic_str == '':\n last_dynamic_str = cards_data[1]['desc']['dynamic_id_str']\n print(last_dynamic_str)\n index = 0\n content_list=[]\n cards_data[0]['card'] = json.loads(cards_data[0]['card'],encoding='gb2312')\n nowtime = time.time().__int__()\n # card是字符串,需要重新解析\n while last_dynamic_str != cards_data[index]['desc']['dynamic_id_str']:\n #这条是105 秒前发的。\n if nowtime-cards_data[index]['desc']['timestamp'] > 105:\n break\n try:\n if (cards_data[index]['desc']['type'] == 64):\n content_list.append(VR_name_list[VRindex] +'发了新专栏「'+ cards_data[index]['card']['title'] + '」并说: ' +cards_data[index]['card']['dynamic'])\n else:\n if (cards_data[index]['desc']['type'] == 8):\n content_list.append(VR_name_list[VRindex] + '发了新视频「'+ cards_data[index]['card']['title'] + '」并说: ' +cards_data[index]['card']['dynamic'])\n else: \n if ('description' in cards_data[index]['card']['item']):\n #这个是带图新动态\n content_list.append(VR_name_list[VRindex] + '发了新动态: ' +cards_data[index]['card']['item']['description'])\n print('Fuck')\n #CQ使用参考:[CQ:image,file=http://i1.piimg.com/567571/fdd6e7b6d93f1ef0.jpg]\n for pic_info in cards_data[index]['card']['item']['pictures']:\n content_list.append('[CQ:image,file='+pic_info['img_src']+']')\n else:\n #这个表示转发,原动态的信息在 cards-item-origin里面。里面又是一个超级长的字符串……\n #origin = json.loads(cards_data[index]['card']['item']['origin'],encoding='gb2312') 我也不知道这能不能解析,没试过\n #origin_name = 'Fuck'\n if 'origin_user' in cards_data[index]['card']:\n origin_name = cards_data[index]['card']['origin_user']['info']['uname']\n content_list.append(VR_name_list[VRindex]+ '转发了「'+ origin_name + '」的动态并说: ' +cards_data[index]['card']['item']['content'])\n else:\n #这个是不带图的自己发的动态\n content_list.append(VR_name_list[VRindex]+ '发了新动态: ' +cards_data[index]['card']['item']['content'])\n content_list.append('本条动态地址为'+'https://t.bilibili.com/'+ cards_data[index]['desc']['dynamic_id_str'])\n except Exception as err:\n print('PROCESS ERROR')\n pass\n index += 1\n# print(len(cards_data))\n# print(index)\n if len(cards_data) == index:\n break\n cards_data[index]['card'] = json.loads(cards_data[index]['card'])\n f = open(str(uid)+'Dynamic','w')\n f.write(cards_data[0]['desc']['dynamic_id_str'])\n f.close()\n return content_list\n\n\ndef GetLiveStatus(uid):\n res = requests.get('https://api.live.bilibili.com/room/v1/Room/getRoomInfoOld?mid='+str(uid))\n res.encoding = 'utf-8'\n res = res.text\n try:\n with open(str(uid)+'Live','r') as f:\n last_live_str = f.read()\n f.close()\n except Exception as err:\n last_live_str = '0'\n pass\n live_data = json.loads(res)\n live_data = live_data['data']\n now_live_status = str(live_data['liveStatus'])\n live_title = live_data['title']\n f = open(str(uid)+'Live','w')\n f.write(now_live_status)\n f.close()\n if last_live_str == '0':\n if now_live_status == '1':\n return live_title\n return ''\n\n\n\ndef main():\n print(GetDynamicStatus(455916618,0))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"VRBot_github.py","file_name":"VRBot_github.py","file_ext":"py","file_size_in_byte":5470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"645317181","text":"#coding:utf-8\nfrom baseView.baseView import BaseView\nfrom common.desired_caps import appium_desired\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.common.by import By\nimport logging\nimport time,os\nimport csv\n\n\n\nclass Common(BaseView):\n skipBtn=(By.ID, 'com.tal.kaoyan:id/tv_skip')\n\n#跳过APP启动后的banner\n def check_skipBtn(self):\n logging.info('========check_skipBtn========')\n try:\n skipBtn=self.driver.find_element(*self.skipBtn)\n except NoSuchElementException:\n logging.info('no skip button')\n else:\n skipBtn.click()\n\n#获取屏幕大小\n def get_size(self):\n x = self.driver.get_window_size()['width']\n y = self.driver.get_window_size()['height']\n return x, y\n\n#向左滑动\n def swipeLeft(self):\n logging.info('swipeLeft')\n l= self.get_size()\n x1 = int(l[0]*0.9)\n y1 = int(l[1]*0.5)\n x2 = int(l[0]*0.1)\n self.swipe(x1,y1,x2,y1,1000)\n\n#向下滑动\n def swipeUp(self):\n l= self.get_size()\n x1 = int(l[0]*0.5)\n y1 = int(l[1]*0.9)\n y2 = int(l[1]*0.3)\n self.swipe(x1,y1,x1,y2,2000)\n\n\n#获取时间\n def getTime(self):\n self.now=time.strftime(\"%Y-%m-%d %H:%M:%S\")\n return self.now\n\n#获取截图\n def getScreenshot(self, module):\n time=self.getTime()\n image_file=os.path.dirname(os.path.dirname(__file__))+'/screenshots/%s_%s.png' %(module, time)\n\n logging.info('get %s screenshot'%module)\n self.driver.get_screenshot_as_file(image_file)\n\n# 登录后出现的隐私条款弹窗\n term_agree=(By.ID, 'com.tal.kaoyan:id/tv_agree')\n def check_privateTerm(self):\n logging.info('===check_privateTerm===')\n try:\n element=self.driver.find_element(*self.term_agree)\n except NoSuchElementException:\n pass\n else:\n logging.info('agree private term')\n element.click()\n\n# 获取data文件夹数据\n def get_csv_data(self, csv_file,line):\n logging.info('=====get_csv_data=====')\n with open(csv_file, 'r', encoding='utf-8-sig') as file:\n reader=csv.reader(file)\n for index, row in enumerate(reader,1):\n if index == line:\n return row\n\n\n\n\n\nif __name__ == '__main__':\n driver=appium_desired()\n com=Common(driver)\n # com.check_skipBtn()\n com.swipeLeft()\n com.getScreenshot('startApp')\n\n","sub_path":"Appium_kaoyb/common/common_fun.py","file_name":"common_fun.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"542334861","text":"'''\nfunction to extract time resolution from the beta scope measurement\n'''\n\nimport ROOT\n\ndef get_time_resolution( tfile_name, cuts, cfd, dut_ch, trig_ch, return_histo=False ):\n tfile = ROOT.TFile.Open(tfile_name, \"r\")\n ttree_wfm = tfile.wfm\n\n #parameter to project on the histogram. Time difference of dut and trig\n tdiff = \"cfd%s[%s]-cfd%s[20]\"%(dut_ch, cfd, trig_ch)\n\n #create default histogram for pre-processing.\n preHisto = ROOT.TH1D(\"preHisto\", \"preHisto\", 100, 1, 1)\n ttree_wfm.Project(\"preHisto\", tdiff, cuts)\n sample_std = preHisto.GetStdDev(1)\n sample_mean = preHisto.GetMean(1)\n num_events = preHisto.GetEntries()\n IQR = 2*0.67845*sample_std #might not be the correct one\n\n #cfd time different histogram for timing resolution extraction\n bin_width = 2*IQR/pow(num_events, 1.0/3.0)\n #raw_input(bin_width)\n #raw_input(sample_std)\n #raw_input(sample_mean)\n min_range = sample_mean - 5.0*sample_std\n max_range = sample_mean + 5.0*sample_std\n num_bins = int((max_range-min_range)/bin_width)\n tdiff_histo = ROOT.TH1D(\"tdiff_histo\", \"tdiff_histo\", num_bins, min_range, max_range)\n ttree_wfm.Project(\"tdiff_histo\", tdiff, cuts)\n gaussian = ROOT.TF1(\"gaussian\", \"gaus\")\n tdiff_histo.Fit(gaussian)\n sigma = gaussian.GetParameter(2)\n sigma_err = gaussian.GetParError(2)\n\n #returning histogram\n if return_histo:\n tdiff_histo.SetDirectory(0)\n return {\"histo\":tdiff_histo, \"sigma\":sigma, \"sigma_err\":sigma_err}\n else:\n return {\"sigma\":sigma, \"sigma_err\":sigma_err}\n\n\n#main\nif __name__ == \"__main__\":\n\n import argparse\n cml_parser = argparse.ArgumentParser()\n cml_parser.add_argument(\"--CFD\", dest=\"CFD\", nargs=\"?\", default=\"50\", type=int, help=\"CFD\")\n\n argv = cml_parser.parse_args()\n\n import math\n ROOT.gROOT.SetBatch(True)\n ROOT.gStyle.SetOptFit(1)\n\n trigger_resolution = 16.7 #ps\n trigger_resolution_err = 0.7\n\n import configparser\n config_file = configparser.ConfigParser()\n config_file.read( \"run_info_v08022018.ini\" )\n\n #get number of files(runs)\n num_files = config_file[\"header\"][\"number_of_runs\"]\n\n file_prefix = \"\"\n if config_file[\"header\"][\"use_selected_events\"] == \"true\":\n file_prefix = \"Selected_\"\n else:\n file_prefix = \"\"\n\n\n sensor_name = config_file[\"header\"][\"sensor\"]\n dut_ch = config_file[\"header\"][\"dut_channel\"]\n trig_ch = config_file[\"header\"][\"trigger_channel\"]\n\n output = []\n for runIndex in range(int(num_files)):\n\n fileName = file_prefix + config_file[\"run%s\"%runIndex][\"file_name\"]\n bias = int(config_file[\"run%s\"%runIndex][\"bias\"].split(\"V\")[0])\n try:\n temperature = config_file[\"run%s\"%runIndex][\"temperature\"]\n except:\n temperature = -30\n raw_cut = config_file[\"run%s\"%runIndex][\"cut_%s\"%dut_ch].split(\" \")\n #raw_input(raw_cut)\n dut_cut = \"tmax%s[0]-cfd3[20] > %s && tmax%s[0]-cfd3[20] < %s && pmax%s[0] > %s && pmax%s[0] < %s\"%(dut_ch, raw_cut[0], dut_ch, raw_cut[1], dut_ch, raw_cut[2], dut_ch, raw_cut[3] )\n trig_cut = \"tmax%s[0]-cfd3[20] > %s && tmax%s[0]-cfd3[20] < %s && pmax%s[0] > %s && pmax%s[0] < %s\"%(trig_ch, raw_cut[4], trig_ch, raw_cut[5], trig_ch, raw_cut[6], trig_ch, raw_cut[7] )\n cuts = dut_cut + \" && \"+ trig_cut\n result = get_time_resolution(fileName, cuts, argv.CFD, dut_ch, trig_ch, True)\n\n dut_time_res = math.sqrt( math.pow(result[\"sigma\"],2) - math.pow(trigger_resolution,2) )\n dut_time_res_err = math.sqrt( math.pow(result[\"sigma\"],2)/(math.pow(result[\"sigma\"],2) - math.pow(trigger_resolution,2))*math.pow(result[\"sigma_err\"],2) + math.pow(trigger_resolution,2)/(math.pow(result[\"sigma\"],2) - math.pow(trigger_resolution,2))*math.pow(trigger_resolution_err, 2))\n\n output.append(\"%s,%s,%s,%s\"%(temperature, bias, dut_time_res,dut_time_res_err))\n\n #saving plots\n result[\"histo\"].GetXaxis().SetTitle(\"Time Difference\")\n c = ROOT.TCanvas(\"c%s\"%runIndex)\n c.cd()\n result[\"histo\"].Draw()\n c.SaveAs(\"bias_%s_temp_%sC_tdiff_fit_CFD%s.png\"%(bias, temperature, argv.CFD))\n \n\n print(\"Sensor: %s\"%sensor_name)\n print(\"Temp,Bias,Res,ResErr\")\n for o in output:\n print(o)\n","sub_path":"scripts/betaScope_pyScript/get_time_res.py","file_name":"get_time_res.py","file_ext":"py","file_size_in_byte":4275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"347006320","text":"# imports from pygame library\nimport random, time, pygame, sys, copy\nfrom pygame.locals import *\n\n# Define the colors we will use in RGB format\nBLACK = ( 0, 0, 0)\nWHITE = (255, 255, 255)\nBLUE = ( 0, 0, 255)\nGREEN = ( 0, 255, 0)\nRED = (255, 0, 0)\n\n#Define Global constants\nwidth, height = 320, 320\nnumRowsCols = 4 # must be square for this game...\n\ndef main():\n # Initialize the game screen\n pygame.init()\n gameScreen=pygame.display.set_mode((width, height))\n #print (\"hello world\")\n \n #initialize the blocks\n gameGrid = makeGrid(numRowsCols,numRowsCols)\n printGrid(gameGrid)\n \n #draw one block\n blockWidth=width/numRowsCols\n blockHeight=height/numRowsCols\n pygame.draw.rect(gameScreen, RED, [0, 0, blockWidth, blockHeight])\n #block = pygame.Surface((blockWidth,blockHeight). 0, screen)\n pygame.draw.rect(gameScreen, WHITE, [0, 0, blockWidth, blockHeight], int(blockWidth*0.05))\n \n #draw text over the block\n #pygame.font.init() # If you've already called pygame.init() in your program, you don't have to call pygame.font.init()\n blockFont = pygame.font.SysFont(pygame.font.get_default_font(), 28, True, False)\n blockTextSurface = blockFont.render(str(gameGrid[0][0].numValue), False, WHITE)\n gameScreen.blit(blockTextSurface, ((blockWidth - blockTextSurface.get_rect().width) / 2, (blockHeight - blockTextSurface.get_rect().height) / 2))\n \n gameGrid[1][0].DrawBlock(gameScreen, 1, 0)\n \n DrawGrid(gameScreen,gameGrid)\n pygame.display.flip()\n \n #main game loop\n endGame=False\n while (1):\n if (endGame==True):\n #any other cleanup can go here\n break\n \n # 8 - loop through the events\n for event in pygame.event.get():\n # check if the event is the X button \n if event.type==pygame.QUIT:\n## # if it is quit the game and everything\n pygame.quit()\n endGame=True\n \n \n# Creates a 2D List of 0's, nCols x nRows large, and fills with SlidingBlock type objects\ndef makeGrid(nCols,nRows):\n grid = []\n numbers=[]\n for x in range(0,nCols*nRows):\n numbers.append(x)\n random.shuffle(numbers)\n x=0\n print(numbers)\n \n for i in range(nRows):\n # Create an empty list for each row\n grid.append([])\n for j in range(nCols):\n # Pad each column in each row with a 0\n grid[i].append(SlidingBlock(numbers[x]))\n x=x+1\n \n return grid\n\n# object type for the block\nclass SlidingBlock:\n def __init__(self,value):\n self.numValue=value # number on block; 0 denotes empty space\n \n # screen is screen to draw on\n # (i,j) is the col and row of the position of the block in the grid\n def DrawBlock(self, screen, i, j):\n #draw one block\n blockWidth=width/numRowsCols\n blockHeight=height/numRowsCols\n pygame.draw.rect(screen, RED, [j*blockWidth, i*blockHeight, blockWidth, blockHeight])\n #block = pygame.Surface((blockWidth,blockHeight). 0, screen)\n pygame.draw.rect(screen, WHITE, [j*blockWidth, i*blockHeight, blockWidth, blockHeight], int(blockWidth*0.05))\n \n #draw text over the block\n #pygame.font.init() # If you've already called pygame.init() in your program, you don't have to call pygame.font.init()\n blockFont = pygame.font.SysFont(pygame.font.get_default_font(), 28, True, False)\n blockTextSurface = blockFont.render(str(self.numValue), False, WHITE)\n screen.blit(blockTextSurface, (j*blockWidth + ((blockWidth - blockTextSurface.get_rect().width) / 2), i*blockHeight + ((blockHeight - blockTextSurface.get_rect().height) / 2)))\n \n #don't flip here; let the calling function flip so the blocks all update together!\n #pygame.display.flip()\n\ndef printGrid(grid):\n for i in range(numRowsCols):\n row = \"\"\n for j in range(numRowsCols):\n row += str(grid[i][j].numValue) + \" \"\n print (row)\n\ndef DrawGrid(screen, grid):\n for i in range(numRowsCols):\n for j in range(numRowsCols):\n if(grid[i][j].numValue == 0):\n grid[i][j].DrawBlock(screen,i,j)\n #else do nothing because position 0 is the empty spot\n #print (row)\n\nif __name__ == '__main__':\n main()","sub_path":"slide-15_v1.py","file_name":"slide-15_v1.py","file_ext":"py","file_size_in_byte":4395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"306290029","text":"\"\"\"\r\nDjango settings for gamekip project.\r\n\r\nFor more information on this file, see\r\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\r\n\r\nFor the full list of settings and their values, see\r\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\r\n\"\"\"\r\n\r\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\r\nimport os\r\nimport sys\r\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\r\nTEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]\r\n\r\n# Quick-start development settings - unsuitable for production\r\n# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/\r\n\r\n# SECURITY WARNING: keep the secret key used in production secret!\r\nSECRET_KEY = '=zq8m4z-g676hs@m%6ls58q7o$1su!^@qfi3j-_fu#t_5@feo6'\r\n\r\n# SECURITY WARNING: don't run with debug turned on in production!\r\nDEBUG = True\r\n\r\nTEMPLATE_DEBUG = True\r\n\r\nALLOWED_HOSTS = []\r\nSITE_ID = 1\r\n\r\n\r\n# Application definition\r\n\r\nINSTALLED_APPS = (\r\n\t'django.contrib.admin',\r\n\t'django.contrib.auth',\r\n\t'django.contrib.contenttypes',\r\n\t'django.contrib.sessions',\r\n\t'django.contrib.messages',\r\n\t'django.contrib.staticfiles',\r\n\t'django.contrib.humanize',\r\n\t'django.contrib.sites',\r\n\t'django.contrib.redirects',\r\n\r\n\t# Librerias\r\n\t'south', # migracion de db || pip install south\r\n\t'notifications',# pip install django-notifications-hq\r\n\t'post_office', # pip install django-post_office\r\n\t'django_user_agents', #pip install pyyaml ua-parser user-agents\r\n\t'actstream' , # pip install django-activity-stream\r\n\t#pip install pytz // si da error en uso horario\r\n\t# pip install --upgrade google-api-python-client\r\n\r\n\t# pip install django-user-agents\r\n\t# pip install schedule\r\n\r\n\t# Gamekip apps\r\n\t'gamekip.templatetags.app_filters',\r\n\t'gamekip',\r\n)\r\n\r\nMIDDLEWARE_CLASSES = (\r\n\t'django.middleware.gzip.GZipMiddleware',\r\n\t'django.contrib.sessions.middleware.SessionMiddleware',\r\n\t'django.middleware.common.CommonMiddleware',\r\n\t'django.middleware.csrf.CsrfViewMiddleware',\r\n\t'django.contrib.auth.middleware.AuthenticationMiddleware',\r\n\t'django.contrib.messages.middleware.MessageMiddleware',\r\n\t'django.middleware.clickjacking.XFrameOptionsMiddleware',\r\n\t'django.contrib.redirects.middleware.RedirectFallbackMiddleware',\r\n\t'gamekip.analytic_middelware.AnalyticMiddelware',\r\n\t'django_user_agents.middleware.UserAgentMiddleware',\r\n)\r\n\r\nROOT_URLCONF = 'gamekip.urls'\r\n\r\nWSGI_APPLICATION = 'gamekip.wsgi.application'\r\n\r\nAUTHENTICATION_BACKENDS = ('gamekip.backends.EmailAuthBackend',)\r\nLOGIN_URL = 'login'\r\nLOGIN_REDIRECT_URL = '/'\r\n\r\n\r\n# Database\r\n# https://docs.djangoproject.com/en/1.6/ref/settings/#databases\r\n\r\ndefault_db = {\r\n\t'ENGINE': 'django.db.backends.postgresql_psycopg2',\r\n\t'USER': 'postgres',\r\n\t'NAME': 'teamquest',\r\n\t'PASSWORD': 'cw#lv2012',\r\n\t'HOST': '127.0.0.1',\r\n\t'PORT': '5432',\r\n}\r\n\r\nsqlite_db = {\r\n\t'ENGINE': 'django.db.backends.sqlite3',\r\n\t'NAME': os.path.join(BASE_DIR, 'gamekip.db'),\r\n}\r\n\r\nDATABASES = {\r\n\t'default': sqlite_db,#default_db,\r\n}\r\n\r\n# Internationalization\r\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\r\n\r\nLANGUAGE_CODE = 'es'\r\n\r\nTIME_ZONE = 'America/Caracas'\r\nUSE_I18N = True\r\nUSE_L10N = True\r\nUSE_TZ = True\r\n\r\nTEMPLATE_CONTEXT_PROCESSORS = (\r\n\t\"django.contrib.auth.context_processors.auth\",\r\n\t\"django.core.context_processors.request\",\r\n\t\"django.contrib.messages.context_processors.messages\",\r\n)\r\n\r\nMESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'\r\n\r\n# Static files (CSS, JavaScript, Images)\r\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\r\n\r\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\r\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\r\nMEDIA_URL = '/media/'\r\nSTATIC_URL = '/static/'\r\n\r\nSTATICFILES_DIRS = (\r\n\tos.path.join(BASE_DIR, 'staticfiles'),\r\n)\r\n\r\n## email\r\nEMAIL_HOST = 'smtp.gmail.com'\r\nEMAIL_PORT = '587'\r\nEMAIL_HOST_USER = 'gamekip.inc@gmail.com'\r\nEMAIL_HOST_PASSWORD = 'gkip@20142'\r\nEMAIL_USE_TLS = True\r\n\r\nNOTIFICATIONS_SOFT_DELETE=True\r\nEMAIL_BACKEND = 'post_office.EmailBackend'\r\n\t\r\nif len(sys.argv) > 1 and sys.argv[1] != 'runserver':\r\n\r\n\tPREPEND_WWW = True\r\n\tSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\r\n\tSESSION_COOKIE_SECURE = True\r\n\tCSRF_COOKIE_SECURE = True\r\n","sub_path":"gamekip/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"92966199","text":"# coding: utf-8\nfrom datetime import datetime\nfrom openerp import models, api\nimport time\nfrom dateutil.parser import parse\nfrom odoo.exceptions import UserError\nfrom odoo import models, fields, api\n\nclass CashReport(models.AbstractModel):\n _name = 'report.v12_pwk.report_cash_report'\n _template = 'v12_pwk.report_cash_report'\n\n @api.model\n def _get_report_values(self, docids, data=None): \n self.model = self.env.context.get('active_model') \n docs = self.env[self.model].browse(self.env.context.get('active_id'))\n payment_records = []\n partner_records = []\n total_records = [] \n partner_ids = []\n journal_ids = [] \n balance = 0\n saldo_awal = 0\n\n if docs.account_id:\n move_ids = self.env['account.move.line'].search([\n ('move_id.date','<', docs.date_from),\n ('move_id.state','<', 'posted'),\n ('account_id', '=', docs.account_id.id),\n ])\n\n if move_ids:\n for move in move_ids:\n balance = balance + move.debit - move.credit\n\n payment_ids = self.env['account.move.line'].search([ \n ('move_id.date','>=', docs.date_from),\n ('move_id.date','<=', docs.date_to),\n ('account_id', '=', docs.account_id.id), \n ], order=\"date asc\") \n\n saldo_awal = balance\n # elif not docs.journal_id: \n # payment_ids = self.env['account.payment'].search([ \n # ('payment_date','=', docs.date_from),\n # ], order=\"payment_date asc\")\n \n if docs.date_from and docs.account_id:\n subtotal_debit = 0\n subtotal_credit = 0\n subtotal_saldo = 0\n description = ''\n\n for payment in payment_ids:\n origin_ids = self.env['account.payment'].search([\n ('move_name','=',payment.move_id.name)\n ])\n\n if origin_ids:\n description = origin_ids[0].new_description\n\n balance = balance + payment.debit - payment.credit\n payment_records.append({\n 'date': payment.date.strftime('%d-%B-%Y'),\n 'description': description,\n 'debit': payment.debit,\n 'credit': payment.credit,\n 'bank': payment.account_id.name,\n 'saldo': balance,\n 'name': payment.move_id.name,\n }) \n\n subtotal_debit += payment.debit\n subtotal_credit += payment.credit\n\n total_records.append({\n 'bank': docs.account_id.name,\n 'date_from': docs.date_from.strftime('%d-%B-%Y'),\n 'date_to': docs.date_to.strftime('%d-%B-%Y'),\n 'total_credit': subtotal_debit,\n 'total_debit': subtotal_credit,\n 'total_saldo': balance,\n 'saldo_awal': saldo_awal,\n 'office': docs.office,\n 'dibuat_oleh': docs.dibuat_oleh,\n 'print_date': fields.Date.today().strftime('%d-%B-%Y'),\n })\n\n else:\n raise UserError(\"Please enter duration\") \n\n return {\n 'doc_ids': self.ids,\n 'doc_model': self.model,\n 'docs': docs,\n 'time': time,\n 'orders': payment_records,\n 'total': total_records,\n }\n\nclass CashReportWizard(models.TransientModel):\n _name = \"cash.report.wizard\"\n _description = \"Cash Report Wizard\"\n\n account_id = fields.Many2one('account.account', string='Bank / Cash')\n date_from = fields.Date(string='From Date')\n date_to = fields.Date(string='To Date') \n dibuat_oleh = fields.Char(string='Dibuat')\n office = fields.Selection([('Temanggung','Temanggung'),('Jakarta','Jakarta')], string='Lokasi', default=\"Temanggung\")\n partner_ids = fields.Many2many('res.partner', string='Partner', domain=\"[('supplier','=',True)]\")\n journal_ids = fields.Many2many('account.journal', string=\"Journal Bank\")\n\n def print_report(self, data):\n self.ensure_one()\n [data] = self.read()\n cash_ids = self.env['account.move'].browse([])\n datas = {\n 'ids': [],\n 'model': 'account.move',\n 'form': data\n }\n \n return self.env.ref('v12_pwk.action_report_cash_report').with_context(from_transient_model=True).report_action(cash_ids,data=datas)\n","sub_path":"v12_pwk/wizard/cash_report.py","file_name":"cash_report.py","file_ext":"py","file_size_in_byte":4691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"263918985","text":"from haishoku.haishoku import Haishoku\nfrom PIL import Image, ImageDraw\nimport random\nfrom random import randrange\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-u', '--url', required=True, help=\"url to the image\")\nparser.add_argument('-a', '--amount', required=True, help=\"the amount of circles\")\nparser.add_argument('-r', '--radius', required=True, help=\"the max radius\")\nargs = parser.parse_args()\n\npalette = Haishoku.getPalette(args.url)\nw, h = 1200, 1200\n\nimage = Image.new(\"RGB\", (w, h), random.choice(palette)[1])\nlayer = ImageDraw.Draw(image)\n\nfor i in range(0, int(args.amount)):\n r,g,b = random.choice(palette)[1]\n color = \"rgb(\" + str(r) + \",\" + str(g) + \",\" + str(b) + \")\"\n x = randrange(w)\n y = randrange(h)\n radius = randrange(0, int(args.radius))\n layer.ellipse((x, y, x + radius, y + radius), fill=color)\n\nimage.show()\n","sub_path":"37_draw_something_mouseless_coding.py","file_name":"37_draw_something_mouseless_coding.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"616134471","text":"import requests,re\nfrom PIL import Image\ndef find_img_links(html):\n\tpat=re.compile('src=\"(.+?.jpg)\" bdwater=')\n\tlinks=re.findall(pat,html)\n\treturn links\n\nif __name__=='__main__':\n\thtml=requests.get('http://tieba.baidu.com/p/2166231880')\n\thtml=html.text\n\tlinks=find_img_links(html)\n\tprint(links)\n\tcount=0\n\tfor i in links:\n\t\timg=requests.get(i)\n\t\twith open(str(count)+'.jpg','wb') as f:\n\t\t\tf.write(img.content)\n\t\tcount+=1\n\n","sub_path":"python/quiz/0013_findImg/find_imgs.py","file_name":"find_imgs.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"355927682","text":"# This is build from the following paper\n#\n# One pixel attack for fooling deep neural networks\n# Jiawei Su, Danilo Vasconcellos Vargas, and Kouichi Sakurai\n# http://arxiv.org/abs/1710.08864\n\nimport time\nimport pickle\nfrom functools import partial\nimport numpy as np\nfrom scipy.optimize import differential_evolution\nfrom matplotlib import pyplot as plt\nimport keras.backend as K\nfrom keras.models import load_model, Model\nfrom keras.layers import Lambda\nfrom keras.datasets import cifar10\n\ndef modify(x, image, box_size):\n pixels = len(x)//2\n shape = (pixels, 2)\n x = np.reshape(x, shape)\n \n # Copy Image\n image = np.copy(image)\n\n # Modify The Pixes\n for i in range(pixels):\n x_idx, y_idx = x[i, :]\n x_idx = int(np.floor(x_idx))\n y_idx = int(np.floor(y_idx))\n\n for bx in range(box_size):\n for by in range(box_size):\n image[x_idx+bx, y_idx+by] = [0.0, 0.0, 1.0]\n \n return image\n\n\ndef predict(x, image, detector, box_size):\n # Modifiy the specified pixels\n mimg = modify(x, image, box_size)\n \n # Test New Image\n pred = detector.predict(np.reshape(mimg, (1,32,32,3)))[0][3]\n\n # Return Prediction\n return pred\n\n\ndef success(x, convergence=1.0, image=None, detector=None, box_size=None):\n\n # Modifiy the specified pixels\n mimg = modify(x, image, box_size)\n pred = detector.predict(np.reshape(mimg, (1,32,32,3)))[0]\n \n # Return if image is no longer a cat\n retVal = False\n c = pred[3]\n for p in pred:\n if (p > c):\n retVal = True\n\n if retVal:\n print(\"Found Solution\")\n\n return retVal\n\n\ndef find_change(image, detector, pixel_count=1, max_iter=10, pop_size=200, box_size=1):\n\n # Get Image Shape\n shape = image.shape\n\n # Define bounds\n bounds = [(0,shape[0]-(box_size-1)), (0,shape[1]-(box_size-1))]\n bounds = bounds * pixel_count\n\n # Constrain Population Size\n pop_size = max(1, pop_size // len(bounds))\n \n # Set Default Parameters for Callbacks\n p = partial(predict, image=image, detector=detector, box_size=box_size)\n c = partial(success, image=image, detector=detector, box_size=box_size)\n \n # Differential Evolution\n result = differential_evolution(p, bounds, maxiter=max_iter, popsize=pop_size,\n recombination=1, callback=c, polish=False)\n\n # Return Modification Array\n return result.x\n\n# Load Data\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\n\n# Get Cat Images\ncat_idxs = [i for i, x in enumerate(y_train.flatten().tolist()) if x == 3]\ncat_imgs = x_train[cat_idxs,:,:,:]\ncat_imgs = cat_imgs.astype('float32') / 255.0\n\n# Parameters\npixel_cnt = 1\nmax_iter = 50\npop_size = 50\n\n# Load Detector\ndetector = load_model('trained_models/cifar10_detector_model.h5')\n\n# Predict Images\npreds = detector.predict(cat_imgs)\n\n# Collect Data\nfor box_size in [4,1]:\n print(\"processing boxs: {}\".format(box_size))\n data = []\n avg = 2.25\n rem = (len(cat_imgs) * avg) / 60.0\n \n for i, (image, pred) in enumerate(zip(cat_imgs, preds)):\n print('[{:.4f}, {:.4f}] processing image: {} of {} ({}, {})'.format(avg, rem, i, len(cat_imgs), box_size, pred[3]))\n if ((i % 500) == 0):\n draw_img = True\n\n if (pred[3] > 0.9):\n start = time.time()\n change = find_change(image, detector, pixel_cnt, max_iter=max_iter, pop_size=pop_size, box_size=box_size)\n end = time.time()\n avg = avg + ((end-start) - avg) / (i+1)\n rem = ((len(cat_imgs) - (i+1)) * avg) / 60.0\n new_img = modify(change, image, box_size)\n new_pred = detector.predict(np.reshape(new_img, (1,32,32,3)))[0]\n data.append({'image':image, 'orig_pred': pred, 'change': change, 'new_image': new_img, 'new_pred': new_pred})\n\n if (draw_img):\n draw_img = False\n fig, ax = plt.subplots(1, 2)\n ax[0].imshow(image)\n ax[0].set_title(\"orig: {:.2f}\".format(pred[3]))\n ax[0].axis('off')\n ax[1].imshow(new_img)\n ax[1].set_title('modified: {:.2f}'.format(new_pred[3]))\n ax[1].axis('off')\n fig.savefig(\"output/train_{}_{}.png\".format(i, box_size))\n\n print('saving data')\n with open('output/cat_change_{}.p'.format(box_size), 'wb') as fd:\n pickle.dump(data, fd)\n","sub_path":"extra/train_generator4.py","file_name":"train_generator4.py","file_ext":"py","file_size_in_byte":4131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"491077261","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 20 18:24:55 2018\n\n@author: cgill\n\"\"\"\n\nimport numpy as np\nfrom scipy import integrate\nimport matplotlib.pyplot as plt\n\nR = 8.314 # <- Molar gas constant\n\ndef f(x):\n ''' Function which returns the integrand of the debye model\n \n Parameters:\n x: numpy array\n \n Returns:\n integra: numpy array\n '''\n if x==0:\n integra = 0 # <- Result of Taylor expansion of integrand\n else:\n expo = np.exp(x)\n integra = x**4*expo/(expo-1)**2\n return integra\n\ndef cV(N, T, theta_D):\n ''' Function which returns the molar heat capacity of a monatomic solid\n as described by the debye model\n \n Parameters:\n N: integer corresponding to number of samples\n T: float value of temperature T (in K)\n theta_D: Debye temperature float value\n \n Returns:\n molar_heat: float value of molar heat capacity value for T'''\n xs = 0\n if T==0:\n return 0 #<- Handles divide by zero error in theta_D/T\n xe = theta_D/T\n integral = integrate.quad(f, xs, xe)\n molar_heat = 9*R*(T/theta_D)**3*integral[0]\n return molar_heat\n\ndef cVArr(temp, theta_D):\n ''' Function which returns a numpy array of the molar heat capacity of a\n monatomic solid as described by the debye model\n \n Parameters:\n temp: numpy array oftemperature values T\n \n theta_D: debye temperature float value\n \n Returns:\n cVArray: numpy array of molar heat capacity values for each T\n '''\n N = temp.size\n cVArray = np.zeros(N) # <- Ensures cV is same size as Temp array\n index = 0\n for T in np.nditer(temp):\n cVArray[index] = cV(N, T, theta_D)\n index += 1\n return cVArray\n \n# Data creation\nN = 1000 # <- Number of samples\nT = np.linspace(0, 300, N) # <- Temperature data\ntheta_D = [12.2, 30.6] # <- Debye temp\ncolour = ['r', 'b'] # <- Colour of data fror Debye temp\nlabel = ['$\\Theta_D=12.2$K', '$\\Theta_D=30.6$K']\n\n# create the figure and plot\nfig = plt.figure(figsize=(9, 7))\nax = fig.add_subplot(1, 1, 1)\n\nindex = 0\nfor i in theta_D:\n cVArray = cVArr(T, theta_D[index])\n ax.plot(T, cVArray, colour[index], label=label[index])\n index += 1\n\nax.set_xlim((0, 300))\nax.set_xlabel(\"Temperature, $T$ (K)\")\nax.set_ylabel(\"Molar Heat Capacity, $c_V$ (J K${}^{-1}$ mol${}^{-1}$)\")\nax.set_title(\"Molar heat capcities for solids with Debye temperatures 50K, 250K and 750K\")\nax.text(0.98, 0.15, 'Callum Gill\\n21.11.2018', transform=ax.transAxes,\n horizontalalignment='right', fontsize=14)\nplt.axhline(y=3*R, color='k', linestyle='--', label=r'Dulong-Petit model, $c_V=3R$')\nplt.legend(loc='upper right')\nplt.show()","sub_path":"Python-programs/Useful-physics-programs/Debye Solid/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"386202449","text":"from org.nmrfx.chemistry.io import PDBFile\nfrom org.nmrfx.chemistry.io import SDFile\nfrom org.nmrfx.chemistry.io import Mol2File\nfrom org.nmrfx.chemistry.io import Sequence\nfrom org.nmrfx.structure.chemistry import Molecule\nfrom org.nmrfx.chemistry import MoleculeFactory\nfrom org.nmrfx.chemistry.io import MMcifReader\n\nfrom java.util import ArrayList\nfrom java.lang import ClassLoader\nfrom java.io import BufferedReader\nfrom java.io import InputStreamReader\n\n\n\ndef updateAtomArray():\n ''' Updates the molecule atom array '''\n mol = MoleculeFactory.getActive()\n mol.updateAtomArray()\n\ndef readMMCIF(fileName):\n ''' Reads a pdb file and modifies the static Molecule object.\n '''\n compound = None\n MMcifReader.read(fileName)\n mol = MoleculeFactory.getActive()\n updateAtomArray()\n return mol\n\ndef readPDB(fileName, isCompound = False, iStruct=0):\n ''' Reads a pdb file and modifies the static Molecule object.\n isCompound is used to specify if the file should be read in\n as a ligand or small molecule.\n Important note to take into consideration:\n if isCompound is false, HETATM fields will be ignored in file and\n the file will be read in as a sequence, ultimately creating\n polyer(s)\n\n This command returns either None or a compound depending on whether\n the isCompound is true.\n '''\n compound = None\n pdb = PDBFile()\n if not isCompound:\n pdb.readSequence(fileName,False, iStruct)\n mol = MoleculeFactory.getActive()\n else:\n mol = pdb.readResidue(fileName, None, MoleculeFactory.getActive(), None)\n updateAtomArray()\n return mol\n\ndef readPDBX(fileName, isCompound = False):\n ''' Reads a pdb file and modifies the static Molecule object.\n isCompound is used to specify if the file should be read in\n as a ligand or small molecule.\n Important note to take into consideration:\n if isCompound is false, HETATM fields will be ignored in file and\n the file will be read in as a sequence, ultimately creating\n polyer(s)\n\n This command returns either None or a compound depending on whether\n the isCompound is true.\n '''\n compound = None # FIXME: This parameter is not used\n pdb = PDBFile()\n mol = pdb.read(fileName)\n updateAtomArray()\n return mol\n\ndef readPDBXCoords(fileName, structNum, noComplain, genCoords):\n ''' Reads a pdb file and modifies the static Molecule object.\n structNum is the structure number, noComplain is a boolean for\n printing out an error message, and genCoords is a boolean for \n generating coordinates.\n '''\n\n pdb = PDBFile()\n pdb.readCoordinates(fileName, structNum, noComplain, genCoords)\n updateAtomArray()\n mol = MoleculeFactory.getActive()\n return mol\n\ndef readSDF(fileName, newMolecule = False):\n sdf = SDFile()\n molecule = MoleculeFactory.getActive() if not newMolecule else None\n compound = sdf.read(fileName, None, molecule, None)\n updateAtomArray()\n return compound\n\ndef readMol2(fileName, newMolecule = False):\n mol2 = Mol2File()\n molecule = MoleculeFactory.getActive() if not newMolecule else None\n compound = mol2.read(fileName, None, molecule, None)\n updateAtomArray()\n return compound\n\ndef readSequenceString(polymerName, sequence, seqReader=None):\n ''' Creates a polymer from the sequence provided with the name of polymerName\n The sequence input can either be a chain of characters but will only work\n if the desired polymer is RNA. If creating a polymer for a protein,\n sequence must be a list using the 3 letter code.\n '''\n seqAList = ArrayList()\n seqAList.addAll(sequence)\n if (seqReader == None):\n seqReader = Sequence()\n seqReader.newPolymer()\n seqReader.read(polymerName, seqAList, \"\")\n updateAtomArray()\n mol = MoleculeFactory.getActive()\n return mol\n\n\ndef readSequence(seqFile, convert=False, polymerName=None,seqReader=None):\n if convert:\n import os\n import osfiles\n dir = os.path.dirname(seqFile)\n seqFile = osfiles.convertSeqFile(seqFile,dir)\n if (seqReader == None):\n seqReader = Sequence()\n seqReader.newPolymer()\n seqReader.read(seqFile, polymerName) if polymerName else seqReader.read(seqFile)\n updateAtomArray()\n mol = MoleculeFactory.getActive()\n return mol\n\ndef readYaml(file):\n from java.io import FileInputStream\n from org.yaml.snakeyaml import Yaml\n\n input = FileInputStream(file)\n yaml = Yaml()\n data = yaml.load(input)\n return data\n\ndef readYamlString(yamlString):\n from org.yaml.snakeyaml import Yaml\n\n yaml = Yaml()\n data = yaml.load(yamlString)\n return data\n\n\n\ndef loadResource(resourceName):\n cl = ClassLoader.getSystemClassLoader()\n istream = cl.getResourceAsStream(resourceName)\n lines = \"\"\n if istream == None:\n raise Exception(\"Cannot find '\" + resourceName + \"' on classpath\")\n else:\n reader = InputStreamReader(istream)\n breader = BufferedReader(reader)\n while True:\n line = breader.readLine()\n if line == None:\n break\n if lines != '':\n lines += '\\n'\n lines += line\n breader.close()\n return lines\n\ndef savePDB(molecule, fileName,structureNum=0):\n molecule.writeXYZToPDB(fileName, structureNum)\n\ndef readResiduePairCSV(csvFile=None):\n if csvFile:\n import os\n if os.path.isfile(csvFile):\n loadResource(csvFile)\n \n","sub_path":"src/main/resources/molio.py","file_name":"molio.py","file_ext":"py","file_size_in_byte":5588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"57550002","text":"import numpy as np\nimport pandas as pd\nfrom sklearn import preprocessing\nfrom sklearn.linear_model import LinearRegression\n\nFILENAME = \"matrix_conv_gpu_500_points_Tesla.csv\"\n\n\ndata = np.array(pd.read_csv(FILENAME))\n\ntrain_data = data[:250]\ntest_data = data[250:]\n\nX_train = np.array(train_data[:, [1, 2, 3, 4, 5]])\ny_train = np.array(train_data[:, [0]])\n\nX_test = np.array(test_data[:, [1, 2, 3, 4, 5]])\nY_test = np.array(test_data[:, [1, 2, 3, 4, 5]])\n\nX_train = preprocessing.scale(X_train)\nX_test = preprocessing.scale(X_test)\n\nreg = LinearRegression().fit(X_train, y_train)\n\nprd = reg.predict(X_test)\n\nf = open('re.txt', 'w')\n\nfor i in range(test_data.shape[0]):\n f.writelines(str(prd[i][0]) + \"\\n\")\nf.close()\n\n\n# -----evaluation-----#\n\nimport math\nimport statistics\n\nsum_ae = 0.0\nsum_ape = 0.0\nsum_aape = 0.0\n\ntruth_value_list = []\n\nfor i in range(test_data.shape[0]):\n truth_value = test_data[:, [0]][i][0]\n sum_ae += abs(prd[i][0] - test_data[:, [0]][i][0])\n truth_value_list.append(truth_value)\n\nprint(\"MAE: \", sum_ae / test_data.shape[0])\n\n\nc = 0\n\n# decide the percentage to drop\npercentage = 0.3\nthreshold = sorted(truth_value_list)[int(len(test_data)*percentage) - 1]\n\nmedian = statistics.median(truth_value_list)\n\n\nfor i in range(test_data.shape[0]):\n\n pred_value = prd[i][0]\n truth_value = test_data[:, [0]][i][0]\n\n ape = (abs(prd[i][0] - test_data[:, [0]][i][0]) / test_data[:, [0]][i][0])\n aape = math.atan(abs(prd[i][0] - test_data[:, [0]][i][0]) / test_data[:, [0]][i][0])\n\n # valid rule\n if truth_value > threshold:\n sum_ape += ape\n c += 1\n\n sum_aape += aape\n\nprint(\"MAPE: \", sum_ape / c)\nprint(\"MAAPE: \", sum_aape / test_data.shape[0])\n\nprint(\"threshold value:\", threshold)\nprint(\"truth median:\", median)\nprint(\"range from\", min(truth_value_list), \"to\", max(truth_value_list))\nprint(\"valid points (MAPE):\", c, \"out of\", test_data.shape[0])\n\n# ------------------#\n","sub_path":"performance_prediction/gpu/MC/Matrix_Conv_GPU_LR+C.py","file_name":"Matrix_Conv_GPU_LR+C.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"12631747","text":"#!/usr/bin/env python\n#\n# Copyright 2012 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Setup configuration.\"\"\"\n\nimport platform\n\nfrom ez_setup import use_setuptools\nuse_setuptools()\nfrom setuptools import setup # pylint: disable=g-import-not-at-top\n\n# Configure the required packages and scripts to install, depending on\n# Python version and OS.\nREQUIRED_PACKAGES = [\n 'google-apputils',\n 'python-gflags',\n 'google-api-python-client==1.2',\n 'oauth2client==1.2',\n 'httplib2',\n ]\nCONSOLE_SCRIPTS = [\n 'bq = bq:run_main',\n ]\n\nif platform.system() == 'Windows':\n REQUIRED_PACKAGES.append('pyreadline')\n\npy_version = platform.python_version()\nif py_version < '2.6.5' or py_version >= '3':\n raise ValueError('BigQuery requires Python >= 2.6.5.')\n\n_BQ_VERSION = '2.0.24'\n\nsetup(name='bigquery',\n version=_BQ_VERSION,\n description='BigQuery command-line tool',\n url='http://code.google.com/p/google-bigquery-tools/',\n author='Google Inc.',\n author_email='bigquery-team@google.com',\n # Contained modules and scripts.\n py_modules=[\n 'bq',\n 'bq_flags',\n 'bigquery_client',\n 'table_formatter',\n ],\n entry_points={\n 'console_scripts': CONSOLE_SCRIPTS,\n },\n install_requires=REQUIRED_PACKAGES,\n provides=[\n 'bigquery (%s)' % (_BQ_VERSION,),\n ],\n # Information for packaging of the discovery document.\n include_package_data=True,\n packages=['discovery'],\n package_data={\n 'discovery': ['*'],\n },\n # PyPI package information.\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: End Users/Desktop',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Database :: Front-Ends',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n license='Apache 2.0',\n keywords='google bigquery library',\n )\n","sub_path":"google-cloud-sdk/platform/bq/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"422899030","text":"from django.urls import path, include\n\nfrom extrequests import views, deprecated_views\n\n\nurlpatterns = [\n # training requests\n path('training_requests/', views.all_trainingrequests, name='all_trainingrequests'),\n path('training_requests/merge', views.trainingrequests_merge, name='trainingrequests_merge'),\n path('training_request//', include([\n path('', views.trainingrequest_details, name='trainingrequest_details'),\n path('edit/', views.TrainingRequestUpdate.as_view(), name='trainingrequest_edit'),\n ])),\n path('bulk_upload_training_request_scores/', views.bulk_upload_training_request_scores, name='bulk_upload_training_request_scores'),\n path('bulk_upload_training_request_scores/confirm/', views.bulk_upload_training_request_scores_confirmation, name='bulk_upload_training_request_scores_confirmation'),\n\n # unified workshop requests\n path('workshop_requests/', views.AllWorkshopRequests.as_view(), name='all_workshoprequests'),\n path('workshop_request//', include([\n path('', views.WorkshopRequestDetails.as_view(), name='workshoprequest_details'),\n path('set_state//', views.WorkshopRequestSetState.as_view(), name='workshoprequest_set_state'),\n path('accept_event/', views.WorkshopRequestAcceptEvent.as_view(), name='workshoprequest_accept_event'),\n path('edit/', views.WorkshopRequestChange.as_view(), name='workshoprequest_edit'),\n path('assign/', views.WorkshopRequestAssign.as_view(), name='workshoprequest_assign'),\n path('assign//', views.WorkshopRequestAssign.as_view(), name='workshoprequest_assign'),\n ])),\n\n # workshop inquiries\n path('workshop_inquiries/', views.AllWorkshopInquiries.as_view(), name='all_workshopinquiries'),\n path('workshop_inquiry//', include([\n path('', views.WorkshopInquiryDetails.as_view(), name='workshopinquiry_details'),\n path('set_state//', views.WorkshopInquirySetState.as_view(), name='workshopinquiry_set_state'),\n path('accept_event/', views.WorkshopInquiryAcceptEvent.as_view(), name='workshopinquiry_accept_event'),\n path('edit/', views.WorkshopInquiryChange.as_view(), name='workshopinquiry_edit'),\n path('assign/', views.WorkshopInquiryAssign.as_view(), name='workshopinquiry_assign'),\n path('assign//', views.WorkshopInquiryAssign.as_view(), name='workshopinquiry_assign'),\n ])),\n\n # self-organized submissions\n path('selforganised_submissions/', views.AllSelfOrganisedSubmissions.as_view(), name='all_selforganisedsubmissions'),\n path('selforganised_submission//', include([\n path('', views.SelfOrganisedSubmissionDetails.as_view(), name='selforganisedsubmission_details'),\n path('set_state//', views.SelfOrganisedSubmissionSetState.as_view(), name='selforganisedsubmission_set_state'),\n path('accept_event/', views.SelfOrganisedSubmissionAcceptEvent.as_view(), name='selforganisedsubmission_accept_event'),\n path('edit/', views.SelfOrganisedSubmissionChange.as_view(), name='selforganisedsubmission_edit'),\n path('assign/', views.SelfOrganisedSubmissionAssign.as_view(), name='selforganisedsubmission_assign'),\n path('assign//', views.SelfOrganisedSubmissionAssign.as_view(), name='selforganisedsubmission_assign'),\n ])),\n\n # deprecated: old swc/dc workshop requests\n path('eventrequests/', deprecated_views.AllEventRequests.as_view(), name='all_eventrequests'),\n path('eventrequest//', include([\n path('', deprecated_views.EventRequestDetails.as_view(), name='eventrequest_details'),\n path('set_state//', deprecated_views.EventRequestSetState.as_view(), name='eventrequest_set_state'),\n path('accept_event/', deprecated_views.eventrequest_accept_event, name='eventrequest_accept_event'),\n path('edit/', deprecated_views.EventRequestChange.as_view(), name='eventrequest_edit'),\n path('assign/', deprecated_views.EventRequestAssign.as_view(), name='eventrequest_assign'),\n path('assign//', deprecated_views.EventRequestAssign.as_view(), name='eventrequest_assign'),\n ])),\n\n # deprecated: dc self-organized workshop requests\n path('dc_selforganized_requests/', deprecated_views.AllDCSelfOrganizedEventRequests.as_view(), name='all_dcselforganizedeventrequests'),\n path('dc_selforganized_request//', include([\n path('', deprecated_views.DCSelfOrganizedEventRequestDetails.as_view(), name='dcselforganizedeventrequest_details'),\n path('set_state//', deprecated_views.DCSelfOrganizedEventRequestSetState.as_view(), name='dcselforganizedeventrequest_set_state'),\n path('accept_event/', deprecated_views.dcselforganizedeventrequest_accept_event, name='dcselforganizedeventrequest_accept_event'),\n path('edit/', deprecated_views.DCSelfOrganizedEventRequestChange.as_view(), name='dcselforganizedeventrequest_edit'),\n path('assign/', deprecated_views.DCSelfOrganizedEventRequestAssign.as_view(), name='dcselforganizedeventrequest_assign'),\n path('assign//', deprecated_views.DCSelfOrganizedEventRequestAssign.as_view(), name='dcselforganizedeventrequest_assign'),\n ])),\n\n # deprecated: workshop submissions\n path('submissions/', deprecated_views.AllEventSubmissions.as_view(), name='all_eventsubmissions'),\n path('submission//', include([\n path('', deprecated_views.EventSubmissionDetails.as_view(), name='eventsubmission_details'),\n path('set_state//', deprecated_views.EventSubmissionSetState.as_view(), name='eventsubmission_set_state'),\n path('accept_event/', deprecated_views.eventsubmission_accept_event, name='eventsubmission_accept_event'),\n path('edit/', deprecated_views.EventSubmissionChange.as_view(), name='eventsubmission_edit'),\n path('assign/', deprecated_views.EventSubmissionAssign.as_view(), name='eventsubmission_assign'),\n path('assign//', deprecated_views.EventSubmissionAssign.as_view(), name='eventsubmission_assign'),\n ])),\n]\n","sub_path":"amy/extrequests/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":6232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"602310121","text":"from decimal import *\nimport os\n\ngetcontext().prec = 3 # set precision of the Decimal numbers\n\n\ndef main():\n path = os.getcwd() # get path to current directory\n files = os.listdir(path) # get list of all files in current directory\n resultfile = os.path.join(path, 'result.txt')\n with open(resultfile, 'w') as f: # delete and recreate result file\n f.write('''Date, 0:00 - 0:59, 1:00 - 1:59, 2:00 - 2:59, 3:00 - 3:59,\n 4:00 - 4:59, 5:00 - 5:59, 6:00 - 6:59, 7:00 - 7:59, 8:00 - 8:59,\n 9:00 - 9:59, 10:00 - 10:59, 11:00 - 11:59, 12:00 - 12:59,\n 13:00 - 13:59, 14:00 - 14:59, 15:00 - 15:59, 16:00 - 16:59,\n 17:00 - 17:59, 18:00 - 18:59, 19:00 - 19:59, 20:00 - 20:59,\n 21:00 - 21:59, 22:00 - 22:59, 23:00 - 23:59''' + '\\n')\n for file in files: # go through all the found files\n # make sure to only check .dat files\n file_extension = os.path.splitext(file)[1]\n if file_extension != '.dat':\n print(\"WRONG FILE EXTENSION \" + file)\n continue\n print('FOUND ' + file)\n filepath = os.path.join(path, file) # construct full path to file\n temperatures = loadFile(filepath) # call loadfile\n saveToFile(resultfile, temperatures)\n\n\ndef loadFile(path): # function to load one wetter file and calculate the\n # hourly average außentemperatur - return list (hour, temperature)\n with open(path, 'r') as f: # open file\n temp = f.readline() # read the first line\n # create list to store averages in - fill first element with the date\n # of the day\n avgtemp = [temp[1:11]]\n i = 1 # used for counting how many lines were read - every 360 = 1hour\n t = Decimal() # t is used to sum up the temperatures to average\n for line in f: # go through each line in the file\n # split each line into its elements - split at whitespace\n splitline = str.split(line[:-1])\n # add temperature (column 4) to t\n t = Decimal(t) + Decimal(splitline[3])\n if i % 360 == 0: # every 360 lines\n avgtemp.append(float(t / 360)) # add average to result list\n i = 0 # reset i\n t = 0.0 # and t\n i += 1\n return avgtemp\n\n\ndef saveToFile(filename, line):\n with open(filename, 'a') as f:\n f.write(str(line)[1:-1] + '\\n')\n\n\n# Standard boilerplate to call the main() function to begin\n# the program.\nif __name__ == '__main__':\n main()\n","sub_path":"archive/wetter/wetter.py","file_name":"wetter.py","file_ext":"py","file_size_in_byte":2526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"288258935","text":"#!/usr/bin/env python\n'''\nCustom response objects\n'''\nclass EpochInfoResponse:\n def __init__(\n self, \n absoluteSlot:str,\n blockHeight:int,\n epoch:int,\n slotIndex:int,\n slotsInEpoch:int,\n transactionCount:int\n ) -> None:\n self.absoluteSlot = absoluteSlot\n self.blockHeight = blockHeight\n self.epoch = epoch\n self.slotIndex = slotIndex\n self.slotsInEpoch = slotsInEpoch\n self.transactionCount = transactionCount","sub_path":"rpc/response_helper.py","file_name":"response_helper.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"564614151","text":"###########################################################################################################################################\n## P2VVParameterizations.GeneralUtils: General P2VV parameterization utilities ##\n## ##\n## authors: ##\n## GR, Gerhard Raven, Nikhef & VU, Gerhard.Raven@nikhef.nl ##\n## ##\n###########################################################################################################################################\n\n_parNamePrefix = ''\n\ndef setParNamePrefix(prefix) :\n global _parNamePrefix\n if prefix : _parNamePrefix = prefix\n else : _parNamePrefix = ''\n\ndef getParNamePrefix( fullPrefix = False ) :\n global _parNamePrefix\n if not _parNamePrefix : return ''\n if fullPrefix : return _parNamePrefix + '_'\n return _parNamePrefix\n\nclass _util_parse_mixin( object ) :\n def parameters( self ) :\n return self._params\n\n def parameter( self, name ) :\n for par in self._params :\n if par.GetName() == name : return par\n return None\n\n def getNamePrefix( self, kwargs ) :\n if hasattr( self, '_namePF' ) :\n assert 'ParNamePrefix' not in kwargs or kwargs['ParNamePrefix'] == self._namePF\\\n , 'P2VV -- ERROR: _util_parse_mixin.getNamePrefix: parameter name prefix from arguments is not equal to existing prefix'\n else :\n self._namePF = kwargs.pop( 'ParNamePrefix', None )\n\n if self._namePF : return self._namePF + '_'\n else : return getParNamePrefix(True)\n\n def _parseArg( self, argName, kwargs, **parsDict ) :\n def _create( argName, kwargs, **parsDict ) :\n # get dictionary of parameters to construct the variable\n if argName in kwargs :\n argPars = kwargs.pop(argName)\n parsDict.pop( argName, None )\n else :\n argPars = parsDict.pop( argName, None )\n\n # parse parameter dictionary\n import P2VV.RooFitWrappers\n singleArgKey = parsDict.pop( 'SingleArgKey', 'Value' )\n if argPars != None :\n if isinstance( argPars, P2VV.RooFitWrappers.RooObject ) : return argPars\n parsDict.update( argPars if type(argPars) == dict else { singleArgKey : argPars } )\n\n # construct variable name\n if 'Name' not in parsDict : parsDict['Name'] = argName\n namePF = self.getNamePrefix(kwargs)\n if 'NamePrefix' in parsDict :\n namePF = parsDict.pop('NamePrefix')\n if namePF : namePF += '_'\n if not parsDict.get( 'Observable', False ) and namePF : parsDict['Name'] = namePF + parsDict['Name']\n\n # create variable\n objType = parsDict.pop( 'ObjectType', 'RealVar' )\n return vars(P2VV.RooFitWrappers)[objType](**parsDict)\n\n # create object\n contList = parsDict.pop( 'ContainerList', None )\n obj = _create( argName, kwargs, **parsDict )\n\n # either put object in container list or set it as attribute\n if contList != None : contList.append(obj)\n else : setattr( self, '_%s' % argName, obj )\n\n # put object in parameters\n if not hasattr( self, '_params' ) : self._params = []\n self._params += [ obj ]\n\n return obj\n\n def _check_extraneous_kw( self, kwargs ) :\n if kwargs: \n raise KeyError('got unknown keywords %s for %s' % ( kwargs, type(self) ) )\n\n def setValues( self, **kwargs ) :\n for ( k, v ) in kwargs.iteritems() : \n arg = getattr( self, '_' + k )\n if v < arg.getMin() : arg.setMin(v) \n if v > arg.getMax() : arg.setMax(v) \n arg['Value'] = v\n\n def setConstant( self, pattern, constant = True ) :\n import re\n rc = 0\n nrexp = re.compile(pattern)\n for i in self.parameters(): \n if not nrexp.match( i.GetName() ) : continue\n from ROOT import RooAbsLValue\n if not isinstance( i._var, RooAbsLValue) : continue\n i.setConstant (constant)\n rc += 1\n return rc\n\nclass _util_extConstraints_mixin( object ) :\n def __init__( self, kwargs ) :\n assert not hasattr(self,'_constraints')\n if 'Constraints' in kwargs : self._constraints = kwargs.pop('Constraints') \n elif 'Constraint' in kwargs : self._constraints = set(kwargs.pop('Constraint') )\n else : self._constraints = set()\n assert hasattr(self,'_constraints')\n\n def ExternalConstraints( self ) : return self._constraints\n def hasExtConstraints( self ) : return len(self._constraints) > 0\n\n def addConstraint( self, constr ) :\n self._constraints.add(constr)\n\n def addConstraints( self, constrList ) :\n for constr in constrList : self.addConstraint(constr)\n\nclass _util_conditionalObs_mixin( object ) :\n def __init__( self, kwargs ) :\n assert not hasattr(self,'_conditionals')\n if 'Conditionals' in kwargs : self._conditionals = set( kwargs.pop('Conditionals') )\n elif 'Conditional' in kwargs : self._conditionals = set( kwargs.pop('Conditional') )\n elif 'ConditionalObservables' in kwargs : self._conditionals = set( kwargs.pop('ConditionalObservables') )\n else : self._conditionals = set()\n assert hasattr(self,'_conditionals')\n\n def ConditionalObservables( self ) : return self._conditionals\n\n def addConditional( self, condObs ) :\n self._conditionals.add(condObs)\n\n def addConditionals( self, condObsList ) :\n for obs in condObsList : self.addConditional(obs)\n\n\n#def normalize_individual( name, pdf, tag ) :\n# pl = RooArgList()\n# for t in tag._var :\n# tr = ConstVar('const_%s_%s'%(tag,t.GetName(),Value = t.getVal() )\n# from ROOT import RooCustomizer\n# customizer = RooCustomizer( pdf._var, '%s_%s'%(name,t.GetName() )\n# customizer.replaceArg( tag._var, tr._var )\n# pl += customizer.build(True)\n# # TODO: wrap RooSimultaneous in a RooObject...\n# return RooSimultaneous( name, name, pl, tag )\n\nfrom itertools import product\ndef valid_combinations(states):\n all_states = []\n for level in states:\n all_states.extend([e[0] for e in level])\n all_states = list(set(all_states))\n labels = [[(state, label.GetName()) for label in state] for state in all_states]\n all_combinations = list(product(*labels))\n def good(combination):\n s = set(combination)\n for level in states:\n level_good = False\n for entry in level:\n if entry in s:\n level_good = True\n break\n if not level_good:\n return level_good\n return True\n return filter(good, all_combinations)\n\ndef exclusive_combinations(states):\n all_states = [e[0] for e in states]\n labels = [[(state, label.GetName()) for label in state] for state in all_states]\n all_combinations = list(product(*labels))\n def good(combination):\n s = set(combination)\n r = set(states)\n return len(s & r) == 1\n return filter(good, all_combinations)\n","sub_path":"PhysFit/P2VV/python/P2VV/Parameterizations/GeneralUtils.py","file_name":"GeneralUtils.py","file_ext":"py","file_size_in_byte":7704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"292672566","text":"import training_data_provider\nimport numpy as np\nfrom absolute_path import _get_full_path\nimport training_routines\nimport os.path\nimport json\n\nif __name__ == '__main__':\n\t# load config\n\tconfig_path = training_routines._get_full_path(\"training_config.json\")\n\tif not os.path.isfile(config_path):\n\t\tcopyfile(config_path+\".default\", config_path)\n\tconfig = json.load(open(config_path))\n\t\n\tdata_provider = training_data_provider.DataProvider(config, categorical_labels=True)\n\n\tSAVE_INTERVAL = 100\n\tsavepath = _get_full_path(\"data\", \"training\", \"categorical\")\n\n\t# populate training data\n\ttraining_features = np.zeros((0, int(2*training_data_provider.SEQUENCE_RADIUS // 0.005), 13))\n\ttraining_labels = np.zeros((0, 2))\n\tfor i, (train_input, train_ground_truth) in enumerate(data_provider.xbatches(training=True)):\n\t\tprint(\"batch\", i)\n\t\ttraining_features = np.concatenate((training_features, train_input), axis=0)\n\t\ttraining_labels = np.concatenate((training_labels, train_ground_truth), axis=0)\n\n\t\tif i > 0 and i % SAVE_INTERVAL == 0:\n\t\t\tprint(\"saving...\")\n\t\t\tnp.save(os.path.join(savepath, \"training_features_{}.npy\".format(i)), training_features)\n\t\t\tnp.save(os.path.join(savepath, \"training_labels_{}.npy\".format(i)), training_labels)\n\t\t\ttraining_features = np.zeros((0, int(2*training_data_provider.SEQUENCE_RADIUS // 0.005), 13))\n\t\t\ttraining_labels = np.zeros((0, 2))\n\t\t\tprint(\"done\")\n\tprint(\"saving...\")\n\tnp.save(os.path.join(savepath, \"training_features_{}.npy\".format(i)), training_features)\n\tnp.save(os.path.join(savepath, \"training_labels_{}.npy\".format(i)), training_labels)\n\tprint(\"done\")\n\n\t# populate test data\n\ttesting_features = np.zeros((0, int(2*training_data_provider.SEQUENCE_RADIUS // 0.005), 13))\n\ttesting_labels = np.zeros((0, 2))\n\tfor i, (test_input, test_ground_truth) in enumerate(data_provider.xbatches(training=False)):\n\t\ttesting_features = np.concatenate((testing_features, test_input), axis=0)\n\t\ttesting_labels = np.concatenate((testing_labels, test_ground_truth), axis=0)\n\n\t\tif i > 0 and i % SAVE_INTERVAL == 0:\n\t\t\tprint(\"saving...\")\n\t\t\tnp.save(os.path.join(savepath, \"test_features_{}.npy\".format(i)), testing_features)\n\t\t\tnp.save(os.path.join(savepath, \"test_labels_{}.npy\".format(i)), testing_labels)\n\t\t\ttesting_features = np.zeros((0, int(2*training_data_provider.SEQUENCE_RADIUS // 0.005), 13))\n\t\t\ttesting_labels = np.zeros((0, 2))\n\t\t\tprint(\"done\")\n\tprint(\"saving...\")\n\tnp.save(os.path.join(savepath, \"test_features_{}.npy\".format(i)), testing_features)\n\tnp.save(os.path.join(savepath, \"test_labels_{}.npy\".format(i)), testing_labels)\n\tprint(\"done\")\n\n","sub_path":"generate_training_data.py","file_name":"generate_training_data.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"238800289","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 15 20:49:50 2019\n\n@author: liujun\n\"\"\"\n\nimport os\nimport pandas as pd\nfrom sklearn.ensemble import GradientBoostingClassifier #支持多分类\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn import metrics\nfrom sklearn.preprocessing import OneHotEncoder\n\n\n\nclass basedGBDT_LR: \n def __init__(self):\n self.filepath='telecom-churn-prediction-data.csv'\n self.data=self.feature_transform()\n self.train,self.test=self.split_data()\n self.lr,self.gbdt,self.enc=self.model()\n \n def isnone(self,value):\n if value==\" \" or value is None:\n return '0'\n else:\n return value\n \n def feature_transform(self):\n if os.path.exists('new_data.csv'):\n return pd.read_csv('new_data.csv')\n else:\n print('转换特征值')\n feature_dict={\n 'gender':{'Male':'1','Female':'0'},\n 'Partner':{'Yes':'1','No':'0'},\n 'Dependents':{'Yes':'1','No':'0'},\n 'PhoneService':{'Yes':'1','No':'0'},\n 'MultipleLines':{'Yes':'1','No':'0','No phone service':'2'},\n 'InternetService':{'DSL':'1','Fiber optic':'2','No':'0'},\n 'OnlineSecurity':{'Yes':'1','No':'0','No internet service':'2'},\n 'OnlineBackup':{'Yes':'1','No':'0','No internet service':'2'},\n 'DeviceProtection':{'Yes':'1','No':'0','No internet service':'2'},\n 'TechSupport':{'Yes':'1','No':'0','No internet service':'2'},\n 'StreamingTV':{'Yes':'1','No':'0','No internet service':'2'},\n 'StreamingMovies':{'Yes':'1','No':'0','No internet service':'2'},\n 'Contract':{'Month-to-month':'0','One year':'1','Two year':'2'},\n 'PaperlessBilling':{'Yes':'1','No':'0'},\n 'PaymentMethod':{'Electronic check':'0','Mailed check':'1','Bank transfer (automatic)':'2','Credit card (automatic)':'3'},\n 'Churn':{'Yes':'1','No':'0'},}\n fw=open('new_data.csv','w')\n fw.write(\"customerID,gender,SeniorCitizen,Partner,Dependents,tenure,PhoneService,MultipleLines,InternetService,OnlineSecurity,OnlineBackup,DeviceProtection,TechSupport,StreamingTV,StreamingMovies,Contract,PaperlessBilling,PaymentMethod,MonthlyCharges,TotalCharges,Churn\\n\")\n for line in open(self.filepath,'r').readlines():\n if not line.startswith('customerID'):\n customerID,gender,SeniorCitizen,Partner,Dependents,tenure,PhoneService,MultipleLines,InternetService,OnlineSecurity,OnlineBackup,DeviceProtection,TechSupport,StreamingTV,StreamingMovies,Contract,PaperlessBilling,PaymentMethod,MonthlyCharges,TotalCharges,Churn=line.strip().split(',')\n data=[]\n data.append(customerID) #如果ID空缺 则不要本条数据\n data.append(self.isnone(feature_dict['gender'][gender]))\n data.append(self.isnone(SeniorCitizen))\n data.append(self.isnone(feature_dict['Partner'][Partner]))\n data.append(self.isnone(feature_dict['Dependents'][Dependents]))\n data.append(self.isnone(tenure))\n data.append(self.isnone(feature_dict['PhoneService'][PhoneService]))\n data.append(self.isnone(feature_dict['MultipleLines'][MultipleLines]))\n data.append(self.isnone(feature_dict['InternetService'][InternetService]))\n data.append(self.isnone(feature_dict['OnlineSecurity'][OnlineSecurity]))\n data.append(self.isnone(feature_dict[\"OnlineBackup\"][OnlineBackup]))\n data.append(self.isnone(feature_dict[\"DeviceProtection\"][DeviceProtection]))\n data.append(self.isnone(feature_dict[\"TechSupport\"][TechSupport]))\n data.append(self.isnone(feature_dict[\"StreamingTV\"][StreamingTV]))\n data.append(self.isnone(feature_dict[\"StreamingMovies\"][StreamingMovies]))\n data.append(self.isnone(feature_dict[\"Contract\"][Contract]))\n data.append(self.isnone(feature_dict[\"PaperlessBilling\"][PaperlessBilling]))\n data.append(self.isnone(feature_dict[\"PaymentMethod\"][PaymentMethod]))\n data.append(self.isnone(MonthlyCharges))\n data.append(self.isnone(TotalCharges))\n data.append(self.isnone(feature_dict[\"Churn\"][Churn]))\n fw.write(','.join(data))\n fw.write('\\n')\n return pd.read_csv('new_churn.csv')\n \n def split_data(self): #分割数据\n train,test=train_test_split(self.data,test_size=0.2,random_state=4)\n return train,test\n \n def model(self): #x为除去customerid和churn的值 有y为churn\n x_train=self.train[[x for x in self.train.columns if x not in ['customerID','Churn']]]\n y_train=self.train['Churn']\n lr=LogisticRegression(penalty='l2',tol=0.0001,fit_intercept=True,max_iter=20)\n gbdt=GradientBoostingClassifier(learning_rate=0.1, n_estimators=100, max_depth=7) #学习率为0.1 最大迭代次数为100,树深为7\n gbdt.fit(x_train,y_train)\n #gbdt产生的是3维数据\n enc=OneHotEncoder()\n enc.fit(gbdt.apply(x_train).reshape(-1,100))\n lr.fit(enc.transform(gbdt.apply(x_train).reshape(-1,100)),y_train)\n return lr,gbdt,enc\n \n def evaluate(self):\n x_test=self.test[[x for x in self.test.columns if x not in ['customerID','Churn']]]\n y_test=self.test['Churn']\n y_predict=self.lr.predict_proba(self.enc.transform(self.gbdt.apply(x_test).reshape(-1,100)))\n y_predict_list=[]\n for y in y_predict:\n y_predict_list.append(1 if y[1]>0.5 else 0) #回归概率大于0.5为流失 反之为不流失\n mse=mean_squared_error(y_test,y_predict_list) #平均方差\n print('mse:{}'.format(mse))\n accuracy=metrics.accuracy_score(y_test.values,y_predict_list) #正确率\n print('accuracy:{}'.format(accuracy))\n auc=metrics.roc_auc_score(y_test.values,y_predict_list)\n print('auc:{}'.format(auc))\n \nif __name__ == \"__main__\":\n pred = basedGBDT_LR()\n pred.evaluate()\n \n \n \n \n \n \n ","sub_path":"基于GBDT和LR.py","file_name":"基于GBDT和LR.py","file_ext":"py","file_size_in_byte":6648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"130355387","text":"'''\nProject 1 - Search-based solutions for static Pac-Man game.\n\ndraw.py: Animate Pac-Man path.\n\nSubject:\n MC906/MO416 - Introduction to Artificial Intelligence.\nAuthors:\n Daniel Helú Prestes de Oliveira - RA 166215\n Eduardo Barros Innarelli - RA 170161\n Matheus Rotta Alves - RA 184403\n Victor Ferreira Ferrari - RA 187890\n Vinícius Couto Espindola - RA 188115\n\nUniversity of Campinas - UNICAMP - 2020\n\nLast Modified: 05/05/2020.\n'''\n\nimport os\nos.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = \"hide\"\nimport pygame as py\nimport numpy as np\n#import simulated_annealing as sa\nfrom time import sleep\n\nblack = (0,0,0)\ngrey = (50,50,50)\nwhite = (255,255,255)\nred = (255,0,0)\ngreen = (0,255,0)\nblue = (0,0,200)\nyellow = (255,255,0)\n\n# Múltiple of 4 ples\npx = 28\n\nclass PacScreen():\n\n def __init__(self, maze):\n self.maze = maze.copy()\n self.size = (self.maze.shape[1]*px,self.maze.shape[0]*px)\n self.disp = None\n self.map = None\n\n self.pac = tuple(map(int, np.where(self.maze==b'!')))\n self.goal = tuple(map(int, np.where(self.maze==b'?'))) \n\n def draw(self, maze, pac, goal):\n walls = (maze==b'|')\n bars = (maze==b'-')\n dots = (maze==b'.')\n ghosts = (maze==b'o')\n y_max,x_max = maze.shape\n\n for i in range(y_max):\n for j in range(x_max):\n x = j*px\n y = i*px\n if walls[i][j]:\n py.draw.rect(self.disp, blue, (x,y,px,px))\n if bars[i][j]:\n py.draw.rect(self.disp, white, (x,int(y+px/2-px/4),px,int(px/4)))\n if dots[i][j]:\n x_c = int(x+px/2)\n y_c = int(y+px/2)\n rad = int(px/5)\n py.draw.circle(self.disp, white, (x_c,y_c), rad)\n if ghosts[i][j]:\n a = (x,y+px)\n b = (x+px,y+px)\n c = (int(x+px/2),y)\n py.draw.polygon(self.disp, green, (a,b,c))\n \n x_c = int(pac[1]*px + px/2)\n y_c = int(pac[0]*px + px/2)\n rad = int(px/3)\n py.draw.circle(self.disp, yellow, (x_c,y_c), rad)\n\n x_c = int(goal[1]*px + px/2)\n y_c = int(goal[0]*px + px/2)\n rad = int(px/4)\n py.draw.circle(self.disp, red, (x_c,y_c), rad)\n\n def update(self, maze, pac):\n # Erase old pacman\n x = self.pac[1]*px\n y = self.pac[0]*px\n py.draw.rect(self.disp, black, (x,y,px,px))\n \n # Leave visited tag\n x_c = int(self.pac[1]*px + px/2)\n y_c = int(self.pac[0]*px + px/2)\n rad = int(px/4)\n py.draw.circle(self.disp, grey, (x_c,y_c), rad)\n\n # Update and draw\n self.pac = pac\n x_c = int(self.pac[1]*px + px/2)\n y_c = int(self.pac[0]*px + px/2)\n rad = int(px/3)\n py.draw.circle(self.disp, yellow, (x_c,y_c), rad)\n\n def step(self, action): \n if self.maze[action] == b'.':\n self.maze[action] = ' ' \n self.update(self.maze, action)\n\n def run(self, path, interval=0.005):\n # Create window and draw\n self.disp = py.display.set_mode(self.size)\n self.disp.fill(black)\n self.map = py.PixelArray(self.disp)\n self.draw(self.maze, self.pac, self.goal)\n\n # Animate\n while True:\n for event in py.event.get():\n if event.type == py.QUIT:\n py.quit()\n return\n\n if path:\n self.step(path.pop(0))\n py.display.update()\n sleep(interval)\n\n# if __name__ == '__main__':\n# maze_file = 'mazes/sparse/1a'\n# maze = np.genfromtxt(maze_file, dtype=str, delimiter=1).astype('bytes')\n# display = PacScreen(maze)\n# best = [display.pac]\n# print(best)\n# display.run(best, interval=1000)\n","sub_path":"Project1/PacScreen.py","file_name":"PacScreen.py","file_ext":"py","file_size_in_byte":3915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"103798440","text":"#! /usr/bin/env python\n#\ndef vand1 ( n, x ):\n\n#*****************************************************************************80\n#\n## VAND1 returns the VAND1 matrix.\n#\n# Formula:\n#\n# A(I,J) = X(J)^(I-1)\n#\n# Example:\n#\n# N = 5, X = ( 2, 3, 4, 5, 6 )\n#\n# 1 1 1 1 1\n# 2 3 4 5 6\n# 4 9 16 25 36\n# 8 27 64 125 216\n# 16 81 256 625 1296\n#\n# Properties:\n#\n# A is generally not symmetric: A' /= A.\n#\n# A is nonsingular if, and only if, the X values are distinct.\n#\n# det ( A ) = product ( 1 <= I <= N ) ( 1 <= J < I ) ( X(I) - X(J) ).\n# = product ( 1 <= J <= N ) X(J)\n# * product ( 1 <= I < J ) ( X(J) - X(I) ).\n#\n# A is generally ill-conditioned.\n#\n# The family of matrices is nested as a function of N.\n#\n# Licensing:\n#\n# This code is distributed under the GNU LGPL license.\n#\n# Modified:\n#\n# 27 February 2015\n#\n# Author:\n#\n# John Burkardt\n#\n# Reference:\n#\n# Robert Gregory, David Karney,\n# A Collection of Matrices for Testing Computational Algorithms,\n# Wiley, 1969, page 27,\n# LC: QA263.G68.\n#\n# Nicholas Higham,\n# Stability analysis of algorithms for solving confluent\n# Vandermonde-like systems,\n# SIAM Journal on Matrix Analysis and Applications,\n# Volume 11, 1990, pages 23-41.\n#\n# Parameters:\n#\n# Input, integer N, the order of the matrix desired.\n#\n# Input, real X(N), the values that define A.\n#\n# Output, real A(N,N), the matrix.\n#\n import numpy as np\n\n a = np.zeros ( ( n, n ) )\n\n for i in range ( 0, n ):\n for j in range ( 0, n):\n\n if ( i == 0 and x[j] == 0.0 ):\n a[i,j] = 1.0\n else:\n a[i,j] = x[j] ** i\n\n return a\n\ndef vand1_determinant ( n, x ):\n\n#*****************************************************************************80\n#\n## VAND1_DETERMINANT computes the determinant of the VAND1 matrix.\n#\n# Licensing:\n#\n# This code is distributed under the GNU LGPL license.\n#\n# Modified:\n#\n# 27 February 2015\n#\n# Author:\n#\n# John Burkardt\n#\n# Parameters:\n#\n# Input, integer N, the order of the matrix.\n#\n# Input, real X(N), the parameters.\n#\n# Output, real VALUE, the determinant.\n#\n value = 1.0;\n\n for i in range ( 0, n ):\n for j in range ( 0, i ):\n value = value * ( x[i] - x[j] )\n\n return value\n\ndef vand1_determinant_test ( ):\n\n#*****************************************************************************80\n#\n## VAND1_DETERMINANT_TEST tests VAND1_DETERMINANT.\n#\n# Licensing:\n#\n# This code is distributed under the GNU LGPL license.\n#\n# Modified:\n#\n# 27 February 2015\n#\n# Author:\n#\n# John Burkardt\n#\n import platform\n from vand1 import vand1\n from r8vec_uniform_ab import r8vec_uniform_ab\n from r8mat_print import r8mat_print\n\n print ( '' )\n print ( 'VAND1_DETERMINANT_TEST' )\n print ( ' Python version: %s' % ( platform.python_version ( ) ) )\n print ( ' VAND1_DETERMINANT computes the VAND1 determinant.' )\n\n m = 5\n n = m\n r8_lo = -5.0\n r8_hi = +5.0\n seed = 123456789\n x, seed = r8vec_uniform_ab ( n, r8_lo, r8_hi, seed )\n\n a = vand1 ( n, x )\n\n r8mat_print ( m, n, a, ' VAND1 matrix:' )\n\n value = vand1_determinant ( n, x )\n\n print ( '' )\n print ( ' Value = %g' % ( value ) )\n#\n# Terminate.\n#\n print ( '' )\n print ( 'VAND1_DETERMINANT_TEST' )\n print ( ' Normal end of execution.' )\n return\n\ndef vand1_inverse ( n, x ):\n\n#*****************************************************************************80\n#\n## VAND1_INVERSE returns the inverse of the VAND1 matrix.\n#\n# Formula:\n#\n# A(I,J) = coefficient of X^(J-1) in I-th Lagrange basis polynomial.\n#\n# Example:\n#\n# N = 5, \n# X = ( 2, 3, 4, 5, 6 )\n#\n# 15.00 -14.25 4.96 -0.75 0.04\n# -40.00 44.67 -17.33 2.83 -0.17\n# 45.00 -54.00 22.75 -4.00 0.25\n# -24.00 30.00 -13.33 2.50 -0.17\n# 5.00 -6.42 2.96 -0.58 0.04\n#\n# Properties:\n#\n# The sum of the entries of A is\n#\n# 1 - product ( 1 <= I <= N ) ( 1 - 1 / X(I) ).\n#\n# Licensing:\n#\n# This code is distributed under the GNU LGPL license.\n#\n# Modified:\n#\n# 28 March 2015\n#\n# Author:\n#\n# John Burkardt\n#\n# Parameters:\n#\n# Input, integer N, the order of the matrix.\n#\n# Input, real X(N), the values that define A.\n#\n# Output, real A(N,N), the matrix.\n#\n import numpy as np\n\n a = np.zeros ( ( n, n ) )\n\n for i in range ( 0, n ):\n a[i,0] = 1.0\n\n for i in range ( 0, n ):\n\n index = 0\n\n for k in range ( 0, n ):\n\n if ( k != i ):\n\n for j in range ( index + 1, -1, -1 ):\n\n a[i,j] = - x[k] * a[i,j] / ( x[i] - x[k] )\n\n if ( 0 < j ):\n a[i,j] = a[i,j] + a[i,j-1] / ( x[i] - x[k] )\n\n index = index + 1\n\n return a\n\ndef vand1_test ( ):\n\n#*****************************************************************************80\n#\n## VAND1_TEST tests VAND1.\n#\n# Licensing:\n#\n# This code is distributed under the GNU LGPL license.\n#\n# Modified:\n#\n# 27 February 2015\n#\n# Author:\n#\n# John Burkardt\n#\n import platform\n from r8vec_uniform_ab import r8vec_uniform_ab\n from r8mat_print import r8mat_print\n\n print ( '' )\n print ( 'VAND1_TEST' )\n print ( ' Python version: %s' % ( platform.python_version ( ) ) )\n print ( ' VAND1 computes the VAND1 matrix.' )\n\n m = 5\n n = m\n r8_lo = -5.0\n r8_hi = +5.0\n seed = 123456789\n x, seed = r8vec_uniform_ab ( n, r8_lo, r8_hi, seed )\n\n a = vand1 ( n, x )\n \n r8mat_print ( m, n, a, ' VAND1 matrix:' )\n#\n# Terminate.\n#\n print ( '' )\n print ( 'VAND1_TEST' )\n print ( ' Normal end of execution.' )\n return\n\nif ( __name__ == '__main__' ):\n from timestamp import timestamp\n timestamp ( )\n vand1_test ( )\n timestamp ( )\n \n","sub_path":"matrix_exp/vand1.py","file_name":"vand1.py","file_ext":"py","file_size_in_byte":5650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"272144502","text":"#!/usr/bin/env python3\n\"\"\"\nScript to sync Sentinel-2 data from NCI to AWS S3 bucket\n\"\"\"\n\nfrom datetime import datetime as dt, timedelta\nfrom pathlib import Path\nimport logging\nimport subprocess\n\nimport click\nimport boto3\nimport botocore\nimport yaml\nfrom odc.index import odc_uuid\n\nformatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s')\nhandler = logging.StreamHandler()\nhandler.setFormatter(formatter)\nLOG = logging.getLogger(\"s2_to_s3_rolling\")\nLOG.setLevel(logging.DEBUG)\nLOG.addHandler(handler)\n\n\nNCI_DIR = '/g/data/if87/datacube/002/S2_MSI_ARD/packaged'\nS3_PATH = 'L2/sentinel-2-nbar/S2MSIARD_NBAR'\n\n\ndef find_granules(_num_days, _end_date, root_path=NCI_DIR):\n \"\"\"\n Find granules for the date range specified above. Format is yyyy-mm-dd/granule\n :param _num_days: Number of days to process before the end date.\n :param _end_date: End date for processing granules.\n :param root_path: Root path of Sentinel-2 Data in NCI\n :return: List of granules\n \"\"\"\n # Find the dates between the input date and today, inclusive, formatted like the directories\n dates = [(_end_date - timedelta(days=x)\n ).strftime(\"%Y-%m-%d\") for x in range(_num_days + 1)]\n\n # The list of folders will be returned and will contain all the granules available for\n # the date range specified above. Format is yyyy-mm-dd/granule\n list_of_granules = []\n\n for date in dates:\n dir_for_date = Path(root_path).joinpath(date)\n if Path(dir_for_date).exists():\n granules = [date + \"/\" + name.name for name in Path(dir_for_date).iterdir()]\n list_of_granules += granules\n\n return list_of_granules\n\n\ndef check_granule_exists(_s3_bucket, s3_metadata_path):\n \"\"\"\n Check if granaule already exists in S3 bucket\n :param _s3_bucket: name of s3 bucket to store granules\n :param s3_metadata_path: Path of metadata file\n :return:\n \"\"\"\n s3_resource = boto3.resource('s3')\n\n try:\n # This does a head request, so is fast\n s3_resource.Object(_s3_bucket, s3_metadata_path).load()\n except botocore.exceptions.ClientError as exception:\n if exception.response['Error']['Code'] == \"404\":\n return False\n else:\n return True\n\n\ndef sync_granule(granule, _s3_bucket):\n \"\"\"\n Run AWS sync command to sync granules to S3 bucket\n :param granule: name of the granule\n :param _s3_bucket: name of the s3 bucket\n :return: Returns code zero, if success.\n \"\"\"\n local_path = Path(NCI_DIR).joinpath(granule)\n s3_path = \"s3://{s3_bucket}/{s3_path}/{granule}\".format(\n s3_bucket=_s3_bucket,\n s3_path=S3_PATH,\n granule=granule\n )\n\n # Remove any data that shouldn't be there and exclude the metadata and NBART\n command = \"aws s3 sync {local_path} {s3_path} \" \\\n \"--only-show-errors \" \\\n \"--delete \" \\\n \"--exclude NBART/* \" \\\n \"--exclude ARD-METADATA.yaml\".format(local_path=local_path, s3_path=s3_path)\n\n return_code = subprocess.call(command, shell=True)\n\n if return_code == 0:\n LOG.info(\"Finished processing of granule - %s\", granule)\n else:\n LOG.info(\"Failed processing of granule - %s\", granule)\n\n # If the return code is zero, we have success.\n return return_code == 0\n\n\ndef replace_metadata(yaml_file, _s3_bucket, s3_metadata_path):\n \"\"\"\n Replace metadata with additional info\n :param yaml_file: metadata file in NCI\n :param _s3_bucket: name of s3 bucket\n :param s3_metadata_path: path of metadata file in s3\n \"\"\"\n s3_resource = boto3.resource(\"s3\").Bucket(_s3_bucket)\n\n with open(yaml_file) as config_file:\n temp_metadata = yaml.load(config_file, Loader=yaml.CSafeLoader)\n\n del temp_metadata['image']['bands']['nbart_blue']\n del temp_metadata['image']['bands']['nbart_coastal_aerosol']\n del temp_metadata['image']['bands']['nbart_contiguity']\n del temp_metadata['image']['bands']['nbart_green']\n del temp_metadata['image']['bands']['nbart_nir_1']\n del temp_metadata['image']['bands']['nbart_nir_2']\n del temp_metadata['image']['bands']['nbart_red']\n del temp_metadata['image']['bands']['nbart_red_edge_1']\n del temp_metadata['image']['bands']['nbart_red_edge_2']\n del temp_metadata['image']['bands']['nbart_red_edge_3']\n del temp_metadata['image']['bands']['nbart_swir_2']\n del temp_metadata['image']['bands']['nbart_swir_3']\n del temp_metadata['lineage']\n temp_metadata['creation_dt'] = temp_metadata['extent']['center_dt']\n temp_metadata['product_type'] = 'S2MSIARD_NBAR'\n temp_metadata['original_id'] = temp_metadata['id']\n temp_metadata['software_versions'].update({\n 's2_to_s3_rolling': {\n 'repo': 'https://github.com/GeoscienceAustralia/dea-airflow/',\n 'version': '1.0.0'}\n })\n\n # Create dataset ID based on Kirill's magic\n temp_metadata['id'] = str(odc_uuid(\"s2_to_s3_rolling\", \"1.0.0\", [temp_metadata['id']]))\n\n # Write to S3 directly\n s3_resource.Object(key=s3_metadata_path).put(\n Body=yaml.dump(temp_metadata, default_flow_style=False, Dumper=yaml.CSafeDumper)\n )\n\n LOG.info(\"Finished uploaded metadata %s to %s\", yaml_file, s3_metadata_path)\n\n\ndef sync_dates(_num_days, _end_date, _s3_bucket, _update='no'):\n \"\"\"\n Sync granules to S3 bucket for specified dates\n :param _num_days: Number of days to process before the end date.\n :param _end_date: End date for processing granules.\n :param _s3_bucket: Name of the S3 bucket\n :param _update: Option for granule/metadata update\n ('granule_metadata' or 'granule' or 'metadata' or 'no')\n \"\"\"\n # Initialise error list\n error_list = []\n\n # Since all file paths are of the form:\n # /g/data/if87/datacube/002/S2_MSI_ARD/packaged/YYYY-mm-dd/\n # we can simply list all the granules per date and sync them\n datetime_end = dt.today() if _end_date == 'today' else dt.strptime(_end_date, \"%Y-%m-%d\")\n\n # Get list of granules\n list_of_granules = find_granules(_num_days, datetime_end)\n LOG.info(\"Found %s files to process\", len(list_of_granules))\n\n # For each granule, sync it if it needs syncing\n if len(list_of_granules) > 0:\n for granule in list_of_granules:\n LOG.info(\"Processing %s\", granule)\n\n yaml_file = \"{nci_path}/{granule}/ARD-METADATA.yaml\".format(\n nci_path=NCI_DIR,\n granule=granule\n )\n # Checking if metadata file exists\n if Path(yaml_file).exists():\n # s3://dea-public-data\n # /L2/sentinel-2-nbar/S2MSIARD_NBAR/2017-07-02\n # /S2A_OPER_MSI_ARD_TL_SGS__20170702T022539_A010581_T54LTL_N02.05\n # /ARD-METADATA.yaml\n s3_metadata_path = \"{s3_path}/{granule}/ARD-METADATA.yaml\".format(\n s3_path=S3_PATH,\n granule=granule\n )\n\n already_processed = check_granule_exists(_s3_bucket, s3_metadata_path)\n\n # Maybe todo: include a flag to force replace\n # Check if already processed and apply sync action accordingly\n sync_action = 'granule_metadata' if not already_processed else _update\n\n if sync_action != 'no':\n if sync_action in ('granule_metadata', 'granule'):\n sync_success = sync_granule(granule, _s3_bucket)\n else:\n sync_success = True\n\n if sync_success and (sync_action in ('metadata', 'granule_metadata')):\n # Replace the metadata with a deterministic ID\n replace_metadata(yaml_file, _s3_bucket, s3_metadata_path)\n LOG.info(\"Finished processing and/or uploaded metadata to %s\",\n s3_metadata_path)\n else:\n LOG.error(\"Failed to sync data... skipping\")\n error_list.append(f\"Failed to sync {granule} because of an error in the sync command\")\n else:\n LOG.warning(\"Metadata exists, not syncing %s\", granule)\n else:\n LOG.error(\"Metadata is missing, not syncing %s\", granule)\n error_list.append(f\"Failed to sync {granule} because of missing metadata file\")\n else:\n LOG.warning(\"Didn't find any granules to process...\")\n\n # Raise exception if there was any error during upload process\n if error_list:\n raise ValueError(\"\\n\".join(error_list))\n\n\n@click.command()\n@click.option(\"--numdays\", '-n', type=int, required=True)\n@click.option(\"--enddate\", '-d', type=str, required=True)\n@click.option(\"--s3bucket\", '-b', type=str, required=True)\n@click.option('--doupdate', '-u',\n type=click.Choice(['granule_metadata', 'granule', 'metadata', 'no'], case_sensitive=True),\n default='no')\ndef main(numdays, enddate, s3bucket, doupdate):\n \"\"\"\n Script to sync Sentinel-2 data from NCI to AWS S3 bucket\n :param numdays: Number of days to process before the end date.\n :param enddate: End date for processing granules.\n :param s3bucket: Name of the S3 bucket\n :param doupdate: Option for granule/metadata update\n \"\"\"\n sync_dates(numdays, enddate, s3bucket, doupdate)\n LOG.info(\"Syncing %s days back from %s into the %s bucket and update is %s\",\n numdays, enddate, s3bucket, doupdate)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/s2_to_s3_rolling.py","file_name":"s2_to_s3_rolling.py","file_ext":"py","file_size_in_byte":9529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"36229110","text":"a,b=map(int,input().split())\n# 소수의 n제곱 꼴일때 -> 거의 소수\nflag=[True]*(int(b**0.5)+1)\nflag[1]=False\nfor i in range(2,int(b**0.5)+1):\n if i*i>int(b**0.5)+1:\n break\n if not flag:\n continue\n for j in range(i*i,int(b**0.5)+1,i):\n flag[j]=False\nanswer=0\nfor i in range(1,len(flag)):\n if flag[i]:\n j=i*i\n while True:\n if jb:\n break\n j*=i\n answer+=1\nprint(answer)","sub_path":"Algorithm/for_study/TH 1456.py","file_name":"TH 1456.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"177267823","text":"from random import shuffle\n\n\nn1 = str(input('Primeiro aluno: '))\nn2 = str(input('Segundo aluno: '))\nn3 = str(input('Terceiro aluno: '))\nn4 = str(input('Quarto aluno: '))\nalunos = [n1, n2, n3, n4]\nshuffle(alunos)\nprint(f'Ordem de apresentação: {alunos}')\n","sub_path":"CursoemVideo/ex020.py","file_name":"ex020.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"238183621","text":"#\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"Defines parse tree nodes for the ZetaSQL parser.\n\nThis program defines parse tree node subclasses of ASTNode. It generates\nheaders and other files from templates.\n\nStill a work in progress.\n\n\"\"\"\n\nimport enum\nimport re\n\nfrom absl import app\nfrom absl import flags\nimport jinja2\n\nfrom zetasql.parser.generator_utils import CleanComment\nfrom zetasql.parser.generator_utils import ScalarType\nfrom zetasql.parser.generator_utils import Trim\n\n_make_enum_name_re = re.compile(r'([a-z])([A-Z])')\n\n\ndef NameToEnumName(name):\n \"\"\"Convert a camel-case c++ ASTClassName into AST_CLASS_NAME.\"\"\"\n return _make_enum_name_re.sub(r'\\1_\\2', name.replace('AST', 'Ast')).upper()\n\nSCALAR_BOOL = ScalarType(\n 'bool',\n cpp_default='false')\n\nSCALAR_STRING = ScalarType(\n 'std::string')\n\nSCALAR_ID_STRING = ScalarType(\n 'IdString')\n\n\n# Identifies the FieldLoader method used to populate member fields.\n# Each node field in a subclass is added to the children_ vector in ASTNode,\n# then additionally added to a type-specific field in the subclass using one\n# of these methods:\n# REQUIRED: The next node in the vector, which must exist, is used for this\n# field.\n# OPTIONAL: The next node in the vector, if it exists, is used for this field.\n# REST_AS_REPEATED: All remaining nodes, if any, are used for this field,\n# which should be a vector type.\n# See Add* methods in ast_node.h for further details.\nclass FieldLoaderMethod(enum.Enum):\n REQUIRED = 0\n OPTIONAL = 1\n REST_AS_REPEATED = 2\n\n\ndef Field(name,\n ctype,\n field_loader=FieldLoaderMethod.OPTIONAL,\n comment=None,\n gen_setters_and_getters=True):\n \"\"\"Make a field to put in a node class.\n\n Args:\n name: field name\n ctype: c++ type for this field\n Should be a ScalarType like an int, string or enum type,\n or the name of a node class type (e.g. ASTExpression).\n Cannot be a pointer type, and should not include modifiers like\n const.\n field_loader: FieldLoaderMethod enum specifies which FieldLoader method\n to use for this field.\n comment: Comment text for this field. Text will be stripped and\n de-indented.\n gen_setters_and_getters: When False, suppress generation of default\n template-based get and set methods. Non-standard alternatives\n may be supplied via extra_defs.\n Returns:\n The newly created field.\n\n Raises:\n RuntimeError: If an error is detected in one or more arguments.\n \"\"\"\n if field_loader == FieldLoaderMethod.REST_AS_REPEATED:\n is_vector = True\n else:\n is_vector = False\n\n member_name = name + '_'\n if isinstance(ctype, ScalarType):\n member_type = ctype.ctype\n cpp_default = ctype.cpp_default\n is_node_ptr = False\n enum_name = None\n element_storage_type = None\n else:\n element_storage_type = 'const %s*' % ctype\n if is_vector:\n member_type = 'absl::Span<%s const>' % element_storage_type\n cpp_default = ''\n is_node_ptr = False\n enum_name = None\n else:\n member_type = 'const %s*' % ctype\n cpp_default = 'nullptr'\n is_node_ptr = True\n enum_name = NameToEnumName(ctype)\n return {\n 'ctype': ctype,\n 'cpp_default': cpp_default,\n 'member_name': member_name, # member variable name\n 'name': name, # name without trailing underscore\n 'comment': CleanComment(comment, prefix=' // '),\n 'member_type': member_type,\n 'is_node_ptr': is_node_ptr,\n 'field_loader': field_loader.name,\n 'enum_name': enum_name,\n 'is_vector': is_vector,\n 'element_storage_type': element_storage_type,\n 'gen_setters_and_getters': gen_setters_and_getters,\n }\n\n\nclass TreeGenerator(object):\n \"\"\"Generates code to define tree objects.\n \"\"\"\n\n def __init__(self):\n self.nodes = []\n\n def AddNode(self,\n name,\n parent,\n is_abstract=False,\n fields=None,\n extra_defs='',\n comment=None,\n use_custom_debug_string=False,\n force_gen_init_fields=False):\n \"\"\"Add a node class to be generated.\n\n Args:\n name: class name for this node\n parent: class name of the parent node\n is_abstract: true if this node is an abstract class\n fields: list of fields in this class; created with Field function\n extra_defs: extra c++ definitions to put in this class.\n comment: Comment text for this node. Text will be stripped and\n de-indented.\n use_custom_debug_string: If True, generate prototype for overridden\n SingleNodeDebugString method.\n force_gen_init_fields: If True, generate the InitFields method even when\n there are no fields to be added, so as to ensure there are no children\n \"\"\"\n if fields is None:\n fields = []\n if is_abstract:\n class_final = ''\n else:\n class_final = 'final '\n enum_name = NameToEnumName(name)\n # generate init_fields if there is a least one is_node_ptr or\n # is_vector field, or if force_gen_init_fields was requested.\n gen_init_fields = force_gen_init_fields\n for field in fields:\n if field['is_node_ptr'] or field['is_vector']:\n gen_init_fields = True\n node_dict = ({\n 'name': name,\n 'parent': parent,\n 'class_final': class_final,\n 'is_abstract': is_abstract,\n 'comment': CleanComment(comment, prefix='// '),\n 'fields': fields,\n 'enum_name': enum_name,\n 'extra_defs': extra_defs.rstrip(),\n 'use_custom_debug_string': use_custom_debug_string,\n 'gen_init_fields': gen_init_fields})\n\n self.nodes.append(node_dict)\n\n def Generate(\n self,\n output_path,\n h_template_path=None):\n \"\"\"Materialize the template to generate the output file.\"\"\"\n\n jinja_env = jinja2.Environment(\n undefined=jinja2.StrictUndefined,\n autoescape=False,\n trim_blocks=True,\n lstrip_blocks=True,\n line_statement_prefix='# ',\n loader=jinja2.FileSystemLoader('', followlinks=True))\n\n context = {\n 'nodes': self.nodes,\n # For when we need to force a blank line and jinja wants to\n # eat blank lines from the template.\n 'blank_line': '\\n'\n }\n\n h_template = jinja_env.get_template(h_template_path)\n out = open(output_path, 'wt')\n out.write(Trim(h_template.render(context)))\n out.close()\n\n\ndef main(argv):\n if len(argv) != 3:\n raise Exception(\n 'Usage: %s '\n )\n\n output_path = argv[1]\n h_template_path = argv[2]\n\n gen = TreeGenerator()\n\n gen.AddNode(\n name='ASTStatement',\n parent='ASTNode',\n is_abstract=True,\n comment=\"\"\"\n Superclass of all Statements.\n \"\"\",\n extra_defs=\"\"\"\n bool IsStatement() const final { return true; }\n bool IsSqlStatement() const override { return true; }\n \"\"\"\n )\n\n gen.AddNode(\n name='ASTQueryExpression',\n parent='ASTNode',\n is_abstract=True,\n comment=\"\"\"\n Superclass for all query expressions. These are top-level syntactic\n constructs (outside individual SELECTs) making up a query. These include\n Query itself, Select, UnionAll, etc.\n \"\"\",\n extra_defs=\"\"\"\n bool IsQueryExpression() const override { return true; }\n \"\"\",\n fields=[\n Field(\n 'parenthesized',\n SCALAR_BOOL,\n field_loader=FieldLoaderMethod.REQUIRED)\n ])\n\n gen.AddNode(\n name='ASTQuery',\n parent='ASTQueryExpression',\n fields=[\n Field(\n 'with_clause',\n 'ASTWithClause',\n comment=\"\"\"\n If present, the WITH clause wrapping this query.\n \"\"\"),\n Field(\n 'query_expr',\n 'ASTQueryExpression',\n field_loader=FieldLoaderMethod.REQUIRED,\n comment=\"\"\"\n The query_expr can be a single Select, or a more complex structure\n composed out of nodes like SetOperation and Query.\n \"\"\"),\n Field(\n 'order_by',\n 'ASTOrderBy',\n comment=\"\"\"\n If present, applies to the result of as appropriate.\n \"\"\"),\n Field(\n 'limit_offset',\n 'ASTLimitOffset',\n comment=\"\"\"\n If present, this applies after the result of and\n .\n \"\"\"),\n Field('is_nested', SCALAR_BOOL),\n Field(\n 'is_pivot_input',\n SCALAR_BOOL,\n comment=\"\"\"\n True if this query represents the input to a pivot clause.\n \"\"\")\n ],\n use_custom_debug_string=True\n )\n\n gen.AddNode(\n name='ASTExpression',\n parent='ASTNode',\n is_abstract=True,\n extra_defs=\"\"\"\n bool IsExpression() const override { return true; }\n\n // Returns true if this expression is allowed to occur as a child of a\n // comparison expression. This is not allowed for unparenthesized comparison\n // expressions and operators with a lower precedence level (AND, OR, and NOT).\n virtual bool IsAllowedInComparison() const { return true; }\n \"\"\",\n fields=[\n Field(\n 'parenthesized',\n SCALAR_BOOL,\n field_loader=FieldLoaderMethod.REQUIRED)\n ])\n\n gen.AddNode(\n name='ASTQueryStatement',\n parent='ASTStatement',\n comment=\"\"\"\n Represents a single query statement.\n \"\"\",\n fields=[\n Field(\n 'query',\n 'ASTQuery',\n field_loader=FieldLoaderMethod.REQUIRED),\n ])\n\n gen.AddNode(\n name='ASTSelect',\n parent='ASTQueryExpression',\n use_custom_debug_string=True,\n fields=[\n Field(\n 'hint',\n 'ASTHint'),\n Field(\n 'anonymization_options',\n 'ASTOptionsList'),\n Field(\n 'distinct',\n SCALAR_BOOL),\n Field(\n 'select_as',\n 'ASTSelectAs'),\n Field(\n 'select_list',\n 'ASTSelectList',\n field_loader=FieldLoaderMethod.REQUIRED),\n Field(\n 'from_clause',\n 'ASTFromClause'),\n Field(\n 'where_clause',\n 'ASTWhereClause'),\n Field(\n 'group_by',\n 'ASTGroupBy'),\n Field(\n 'having',\n 'ASTHaving'),\n Field(\n 'qualify',\n 'ASTQualify'),\n Field(\n 'window_clause',\n 'ASTWindowClause'),\n ])\n\n gen.AddNode(\n name='ASTSelectList',\n parent='ASTNode',\n fields=[\n Field(\n 'columns',\n 'ASTSelectColumn',\n field_loader=FieldLoaderMethod.REST_AS_REPEATED),\n ])\n\n gen.AddNode(\n name='ASTSelectColumn',\n parent='ASTNode',\n fields=[\n Field(\n 'expression',\n 'ASTExpression',\n field_loader=FieldLoaderMethod.REQUIRED),\n Field(\n 'alias',\n 'ASTAlias')\n ])\n\n gen.AddNode(\n name='ASTLeaf',\n parent='ASTExpression',\n is_abstract=True,\n use_custom_debug_string=True,\n extra_defs=\"\"\"\n // image() references data with the same lifetime as this ASTLeaf object.\n absl::string_view image() const { return image_; }\n void set_image(std::string image) { image_ = std::move(image); }\n\n bool IsLeaf() const override { return true; }\n \"\"\",\n # Triggers check that there were no children.\n force_gen_init_fields=True,\n fields=[\n Field(\n 'image',\n SCALAR_STRING,\n gen_setters_and_getters=False)\n ])\n\n gen.AddNode(\n name='ASTIntLiteral',\n parent='ASTLeaf',\n extra_defs=\"\"\"\n\n bool is_hex() const;\n \"\"\",\n )\n\n gen.AddNode(\n name='ASTIdentifier',\n parent='ASTExpression',\n use_custom_debug_string=True,\n extra_defs=\"\"\"\n // Set the identifier string. Input is the unquoted identifier.\n // There is no validity checking here. This assumes the identifier was\n // validated and unquoted in zetasql.jjt.\n void SetIdentifier(IdString identifier) {\n id_string_ = identifier;\n }\n\n // Get the unquoted and unescaped string value of this identifier.\n IdString GetAsIdString() const { return id_string_; }\n std::string GetAsString() const { return id_string_.ToString(); }\n absl::string_view GetAsStringView() const {\n return id_string_.ToStringView();\n }\n \"\"\",\n # Triggers check that there were no children.\n force_gen_init_fields=True,\n fields=[\n Field(\n 'id_string',\n SCALAR_ID_STRING,\n gen_setters_and_getters=False)\n ])\n\n gen.AddNode(\n name='ASTAlias',\n parent='ASTNode',\n fields=[\n Field(\n 'identifier',\n 'ASTIdentifier',\n field_loader=FieldLoaderMethod.REQUIRED),\n ],\n extra_defs=\"\"\"\n // Get the unquoted and unescaped string value of this alias.\n std::string GetAsString() const;\n absl::string_view GetAsStringView() const;\n IdString GetAsIdString() const;\n \"\"\"\n )\n\n gen.AddNode(\n name='ASTGeneralizedPathExpression',\n parent='ASTExpression',\n is_abstract=True,\n comment=\"\"\"\n Parent class that corresponds to the subset of ASTExpression nodes that are\n allowed by the grammar rule. It allows for some\n extra type safety vs. simply passing around ASTExpression as\n s.\n\n Only the following node kinds are allowed:\n - AST_PATH_EXPRESSION\n - AST_DOT_GENERALIZED_FIELD where the left hand side is a\n .\n - AST_DOT_IDENTIFIER where the left hand side is a\n .\n - AST_ARRAY_ELEMENT where the left hand side is a\n \n\n Note that the type system does not capture the \"pureness constraint\" that,\n e.g., the left hand side of an AST_DOT_GENERALIZED_FIELD must be a\n in order for the node. However, it is still\n considered a bug to create a variable with type ASTGeneralizedPathExpression\n that does not satisfy the pureness constraint (similarly, it is considered a\n bug to call a function with an ASTGeneralizedPathExpression argument that\n does not satisfy the pureness constraint).\n \"\"\",\n extra_defs=\"\"\"\n // Returns an error if 'path' contains a node that cannot come from the\n // grammar rule.\n static absl::Status VerifyIsPureGeneralizedPathExpression(\n const ASTExpression* path);\n \"\"\")\n\n gen.AddNode(\n name='ASTPathExpression',\n parent='ASTGeneralizedPathExpression',\n comment=\"\"\"\n This is used for dotted identifier paths only, not dotting into\n arbitrary expressions (see ASTDotIdentifier below).\n \"\"\",\n fields=[\n Field(\n 'names',\n 'ASTIdentifier',\n field_loader=FieldLoaderMethod.REST_AS_REPEATED,\n gen_setters_and_getters=False),\n ],\n # The existing API unfortunately uses name(int i) rather than names(int i)\n extra_defs=\"\"\"\n const int num_names() const { return names_.size(); }\n const absl::Span& names() const {\n return names_;\n }\n const ASTIdentifier* name(int i) const { return names_[i]; }\n const ASTIdentifier* first_name() const { return names_.front(); }\n const ASTIdentifier* last_name() const { return names_.back(); }\n\n // Return this PathExpression as a dotted SQL identifier string, with\n // quoting if necessary. If is non-zero, include at most\n // that many identifiers from the prefix of .\n std::string ToIdentifierPathString(size_t max_prefix_size = 0) const;\n\n // Return the vector of identifier strings (without quoting).\n std::vector ToIdentifierVector() const;\n\n // Similar to ToIdentifierVector(), but returns a vector of IdString's,\n // avoiding the need to make copies.\n std::vector ToIdStringVector() const;\n \"\"\"\n )\n\n gen.AddNode(\n name='ASTTableExpression',\n parent='ASTNode',\n is_abstract=True,\n comment=\"\"\"\n Superclass for all table expressions. These are things that appear in the\n from clause and produce a stream of rows like a table.\n This includes table scans, joins and subqueries.\n \"\"\",\n extra_defs=\"\"\"\n bool IsTableExpression() const override { return true; }\n\n // Return the alias, if the particular subclass has one.\n virtual const ASTAlias* alias() const { return nullptr; }\n\n // Return the ASTNode location of the alias for this table expression,\n // if applicable.\n const ASTNode* alias_location() const;\n \"\"\"\n )\n\n gen.AddNode(\n name='ASTTablePathExpression',\n parent='ASTTableExpression',\n comment=\"\"\"\n TablePathExpression are the TableExpressions that introduce a single scan,\n referenced by a path expression or UNNEST, and can optionally have\n aliases, hints, and WITH OFFSET.\n \"\"\",\n fields=[\n Field(\n 'path_expr',\n 'ASTPathExpression',\n comment=\"\"\"\n One of path_exp or path_exp must be non-NULL but not both.\n \"\"\"),\n Field(\n 'unnest_expr',\n 'ASTUnnestExpression'),\n Field(\n 'hint',\n 'ASTHint'),\n Field(\n 'alias',\n 'ASTAlias',\n # Existing API getter specifies \"override\"\n gen_setters_and_getters=False),\n Field(\n 'with_offset',\n 'ASTWithOffset',\n comment=\"\"\"\n Present if the scan had WITH OFFSET.\n \"\"\"),\n Field(\n 'pivot_clause',\n 'ASTPivotClause',\n comment=\"\"\"\n One of pivot_clause or unpivot_clause can be present but not both.\n \"\"\"),\n Field(\n 'unpivot_clause',\n 'ASTUnpivotClause'),\n Field(\n 'for_system_time',\n 'ASTForSystemTime'),\n Field(\n 'sample_clause',\n 'ASTSampleClause'),\n ],\n extra_defs=\"\"\"\n const ASTAlias* alias() const override { return alias_; }\n \"\"\"\n )\n\n gen.AddNode(\n name='ASTFromClause',\n parent='ASTNode',\n fields=[\n Field(\n 'table_expression',\n 'ASTTableExpression',\n field_loader=FieldLoaderMethod.REQUIRED,\n comment=\"\"\"\n A FromClause has exactly one TableExpression child.\n If the FROM clause has commas, they will be expressed as a tree\n of ASTJoin nodes with join_type=COMMA.\n \"\"\"),\n ],\n )\n\n gen.AddNode(\n name='ASTWhereClause',\n parent='ASTNode',\n fields=[\n Field(\n 'expression',\n 'ASTExpression',\n field_loader=FieldLoaderMethod.REQUIRED),\n ],\n )\n\n gen.AddNode(\n name='ASTBooleanLiteral',\n parent='ASTLeaf',\n fields=[\n Field(\n 'value',\n SCALAR_BOOL),\n ],\n )\n\n gen.AddNode(\n name='ASTAndExpr',\n parent='ASTExpression',\n fields=[\n Field(\n 'conjuncts',\n 'ASTExpression',\n field_loader=FieldLoaderMethod.REST_AS_REPEATED),\n ],\n extra_defs=\"\"\"\n bool IsAllowedInComparison() const override { return parenthesized(); }\n \"\"\"\n )\n\n gen.Generate(\n output_path,\n h_template_path=h_template_path)\n\nif __name__ == '__main__':\n app.run(main)\n","sub_path":"zetasql/parser/gen_parse_tree.py","file_name":"gen_parse_tree.py","file_ext":"py","file_size_in_byte":20437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"319577805","text":"# 输入两个链表,找出它们的第一个公共结点。\n# (注意因为传入数据是链表,所以错误测试数据的提示是用其他方式显示的,保证传入数据是正确的)\n\n\nclass Solution:\n def FindFirstCommonNode(self , pHead1 , pHead2 ):\n p1, length1 = pHead1, 0\n p2, length2 = pHead2, 0\n while p1:\n p1 = p1.next\n length1 += 1\n while p2:\n p2 = p2.next\n length2 += 1\n if length1 < length2: # p1指向长链\n pHead1, pHead2 = pHead2, pHead1\n length1, length2 = length2, length1\n\n for _ in range(length1 - length2):\n pHead1 = pHead1.next\n while pHead1 and pHead2:\n if pHead1 == pHead2:\n return pHead1\n else:\n pHead1 = pHead1.next\n pHead2 = pHead2.next\n return None","sub_path":"JZ36. 两个链表的第一个公共节点.py","file_name":"JZ36. 两个链表的第一个公共节点.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"607465657","text":"import argparse\nfrom typing import Dict, Any\n\nfrom ..base import VersionedYAMLParser\nfrom ....enums import PodRoleType\nfrom ....flow import Flow\nfrom ....helper import expand_env_var, ArgNamespace\nfrom ....parsers import set_pod_parser, set_gateway_parser\n\n\ndef _get_taboo():\n \"\"\"\n :return: set of keys that should not be dumped\n \"\"\"\n return {k.dest for k in set_pod_parser()._actions if k.help == argparse.SUPPRESS}\n\n\nclass V1Parser(VersionedYAMLParser):\n \"\"\"V1Parser introduces new syntax and features:\n\n - It has a top-level field ``version``\n - ``pods`` is now a List of Dict (rather than a Dict as prev.)\n - ``name`` is now optional\n - new field ``method`` can be used to specify how to add this Pod into the Flow, availables are:\n - ``add``: (default) equal to `Flow.add(...)`\n - ``needs``: (default) equal to `Flow.needs(...)`\n - ``inspect``: (default) equal to `Flow.inspect(...)`\n\n An example V1 YAML config can be found below:\n .. highlight:: yaml\n .. code-block:: yaml\n\n !Flow\n version: '1.0'\n pods:\n - name: pod0 # notice the change here, name is now an attribute\n method: add # by default method is always add, available: add, needs, inspect\n uses: _pass\n needs: gateway\n - name: pod1 # notice the change here, name is now an attribute\n method: add # by default method is always add, available: add, needs, inspect\n uses: _pass\n needs: gateway\n - method: inspect # add an inspect node on pod1\n - method: needs # let's try something new in Flow YAML v1: needs\n needs: [pod1, pod0]\n\n\n \"\"\"\n version = '1' # the version number this parser designed for\n\n def parse(self, cls: type, data: Dict) -> 'Flow':\n \"\"\"\n :param cls: the class registered for dumping/loading\n :param data: flow yaml file loaded as python dict\n :return: the Flow YAML parser given the syntax version number\n \"\"\"\n envs = data.get('env', {}) # type: Dict[str, str]\n p = data.get('with', {}) # type: Dict[str, Any]\n a = p.pop('args') if 'args' in p else ()\n k = p.pop('kwargs') if 'kwargs' in p else {}\n # maybe there are some hanging kwargs in \"parameters\"\n tmp_a = (expand_env_var(v) for v in a)\n tmp_p = {kk: expand_env_var(vv) for kk, vv in {**k, **p}.items()}\n obj = cls(*tmp_a, env=envs, **tmp_p)\n\n pp = data.get('pods', [])\n for pods in pp:\n p_pod_attr = {kk: expand_env_var(vv) for kk, vv in pods.items()}\n # in v1 YAML, flow is an optional argument\n if p_pod_attr.get('name', None) != 'gateway':\n # ignore gateway when reading, it will be added during build()\n method = p_pod_attr.get('method', 'add')\n # support methods: add, needs, inspect\n getattr(obj, method)(**p_pod_attr, copy_flow=False)\n return obj\n\n def dump(self, data: 'Flow') -> Dict:\n \"\"\"\n :param data: versioned flow object\n :return: the dictionary given a versioned flow object\n \"\"\"\n r = {}\n if data._version:\n r['version'] = data._version\n\n if data._env:\n r['env'] = data._env\n\n if data._kwargs:\n r['with'] = data._kwargs\n\n if data._pod_nodes:\n r['pods'] = []\n\n last_name = 'gateway'\n for k, v in data._pod_nodes.items():\n if k == 'gateway':\n continue\n kwargs = {}\n # only add \"needs\" when the value is not the last pod name\n if list(v.needs) != [last_name]:\n kwargs = {'needs': list(v.needs)}\n\n # get nondefault kwargs\n parser = set_pod_parser()\n if v.role == PodRoleType.GATEWAY:\n parser = set_gateway_parser()\n\n non_default_kw = ArgNamespace.get_non_defaults_args(v.args, parser)\n\n kwargs.update(non_default_kw)\n\n for t in _get_taboo():\n if t in kwargs:\n kwargs.pop(t)\n last_name = kwargs['name']\n r['pods'].append(kwargs)\n return r\n","sub_path":"jina/jaml/parsers/flow/v1.py","file_name":"v1.py","file_ext":"py","file_size_in_byte":4335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"504578599","text":"from bs4 import BeautifulSoup\nimport requests\nimport sys\nimport json\n\nimport time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport pathlib\n\nDRIVER_BIN = str(pathlib.Path().absolute()) + '/chromedriver'\n\n# Retorna o HTML de uma página com base no URL que recebe\ndef get_page(url):\n\thtml = requests.get(url).content\n\treturn html\n\n# Gera um array de objetos com os pontos de entrada para as reviews escritas\ndef get_reviews(html):\n\tsoup = BeautifulSoup(html, 'html.parser')\n\tpages = soup.find_all('section', 'broll wrap')\n\tprint(len(pages))\n\tresponse = []\n\tfor page in pages:\n\t\treviews = page.find('div', 'tbl').find_all('article', 'article REVIEW')\n\t\tfor review in reviews:\n\t\t\tt = review.find('div', 't')\n\t\t\tm = review.find('div', 'm')\n\t\t\timage = t.find('a', 'thumb score-wrapper').find('img', 'thumb').attrs['src']\n\t\t\tlink = t.find('a', 'thumb score-wrapper').attrs['href']\n\t\t\tscore = t.find('a', 'thumb score-wrapper').find('figure').find('div').find('span').find('span').get_text()\n\t\t\tdate = m.find('div', 'info').find('time').attrs['datetime']\n\t\t\ttitle = m.find('h3').get_text().replace(' - Análise', '')\n\t\t\tsynopsis = m.find('p').get_text() \n\n\t\t\tresponse.append({\n\t\t\t\t'image' : image,\n\t\t\t\t'link' : link,\n\t\t\t\t'score' : score,\n\t\t\t\t'date' : date, \n\t\t\t\t'title' : title,\n\t\t\t\t'synopsis' : synopsis\n\t\t\t\t})\n\treturn response\n\t\n# Função para imprimir o output para o ecrã\t\t\t \ndef print_output(obj, out_format = None):\n\tif out_format == 'json':\n\t\tprint(json.dumps(obj, indent = 4, ensure_ascii = False))\n\telse:\n\t\tprint(obj)\n\n# Permite dar scroll automático à página (existência de infinite scrolling)\ndef scroll_page(url):\n\tdriver = webdriver.Chrome(executable_path = DRIVER_BIN)\n\tdriver.get(url)\n\telem = driver.find_element_by_tag_name('body')\n\tlimit = 250\n\twhile limit:\n\t\ttry:\n\t\t\telem.send_keys(Keys.PAGE_DOWN)\n\t\t\ttime.sleep(0.2)\n\t\t\tlimit -= 1\n\t\texcept:\n\t\t\tbreak\n\n\treturn get_reviews(driver.page_source)\n\n# Adiciona o campo de texto ao objeto com a review associada ao URL\ndef add_text(json):\n\tfor obj in json:\n\t\tobj['subtitle'] = get_review_subtitle(obj['link'])\n\t\tobj['text'] = get_review_text(obj['link'])\n\t\tprint('DONE:', obj['title'])\n\treturn json\n\n# Obtenção do texto da review. Podem existir mais páginas, daí a necessidade de uma flag para não entrar em loop\ndef get_review(html, main = True):\n\tsoup = BeautifulSoup(html, 'html.parser')\n\tsubtitle = soup.find('div','article-sub-headline').find('h3', id = 'id_deck').get_text()\n\t\n\tpages = soup.find('section', 'side-by-side article-content')\n\tarticle = pages.find('article', 'article-section article-page').find('div', id = 'id_text').get_text()\n\tpaginator = []\n\t# Caso existam mais páginas\n\ttry:\n\t\tpaginator = soup.find('div', 'paginator').find_all('a')\n\texcept:\n\t\tpass\n\t# Para não duplicar texto\n\threfs = set()\n\t# Article são os 2 primeiros paragrafos da review\n\ttxt = article\n\t# Resto da review\n\tfor p in soup.find_all('p'):\n\t\tpara = p.get_text()\n\t\t# Continua é a indicação que há mais texto numa outra página\n\t\tif para.lower() != 'continua...':\n\t\t\ttxt += para\n\t# calcular texto das restantes páginas\n\tfor pagin in paginator:\n\t\ttry:\n\t\t\threfs.add(pagin.attrs['href'])\n\t\texcept KeyError:\n\t\t\tpass\t\n\t# Armazenar texto das restantes páginas\n\tif main == True:\n\t\tfor link in hrefs:\n\t\t\ttxt += get_review_text(link, False)\n\treturn txt\n\n# Devolve o subtitulo asscciado à review\ndef get_review_subtitle(url):\n\thtml = get_page(url)\n\tsoup = BeautifulSoup(html, 'html.parser')\n\tsubtitle = soup.find('div','article-sub-headline').find('h3', id = 'id_deck').get_text()\n\treturn subtitle\n\n# Função para procurar o texto de uma review\ndef get_review_text(url, main = True):\n\thtml = get_page(url)\n\treturn get_review(html, main)\n\n# Função para armazenar um array de objetos json em ficheiro\ndef save_to_file(data, name):\n\tf = open(name, \"w\")\n\tdados = json.dumps(data, indent = 4, ensure_ascii = False)\n\tf.write(dados)\n\tf.close()\n\tprint(\"File saved.\")\n\n# Função para leitura de objetos json para cache\ndef get_from_file():\n\twith open('reviews.json') as json_file:\n\t\tdata = json.load(json_file)\n\treturn data\n\nbase_url = 'https://pt.ign.com/article/review'\n\ndef main():\n\t# Get the reviews without its textual content\n\t#revs = scroll_page(base_url)\n\t#save_to_file(revs, \"dados/reviews.json\")\n\t#print(len(revs))\n\n\t#Get the textual content of the reviews\n\treviews = get_from_file()\n\tupdated = add_text(reviews)\n\tsave_to_file(updated, \"dados/reviews_full.json\")\n\t#print_output(updated, 'json')\n\t#print(get_review_text('https://pt.ign.com/man-eater/87513/review/maneater-analise'))\n\t#print(get_review_text('https://pt.ign.com/luigis-mansion-3/81362/review/luigis-mansion-3-analise'))\n\n\t\nif __name__ == '__main__':\n\tmain()\n","sub_path":"SPLN/Projetos/TP3/Lemmatization/src/scraping.py","file_name":"scraping.py","file_ext":"py","file_size_in_byte":4716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"619093989","text":"import socket\r\ndns_server=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\r\ndns_server.bind(('localhost',153))\r\nprint('dns server running on port 153..')\r\nname_servers={\r\n 'google.com':'8.8.8.8'\r\n}\r\n\r\ndef dns_search(domain):\r\n flag=0\r\n for key in name_servers.keys():\r\n if key==domain:\r\n return name_servers.get(key)\r\n flag=1\r\n\r\n if flag==0:\r\n return 'none'\r\n\r\n\r\nwhile True:\r\n message,addr=dns_server.recvfrom(4096)\r\n domain=message.decode()\r\n print('Query from ',addr,'for domain:',message.decode())\r\n dns_answer=dns_search(domain)\r\n if dns_answer=='none':\r\n dns_server.sendto(bytes('Host not found', 'utf8'), addr)\r\n\r\n else:\r\n dns_server.sendto(bytes(dns_answer, 'utf8'), addr)\r\n\r\n\r\n\r\n\r\n","sub_path":"NetworksLabCode/dns_server.py","file_name":"dns_server.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"39881344","text":"import json\nimport sys\nfrom collections import Counter\n\nvocab_size = 100\nlongest_text = 0\nlongest_program = 0\nall_words = set()\n\ndef fill_token_set(token_set, tree):\n if type(tree) is list:\n for a in tree:\n fill_token_set(token_set, a)\n else:\n token_set.add(tree)\n\ndef flatten(x):\n if type(x) is list:\n res = []\n for a in x:\n res.extend(flatten(a))\n return res\n else:\n return [x]\n\nunique_programs = set()\n\ndef analyze(file_name):\n global longest_text, longest_program\n count = [0] * vocab_size\n word_counter = Counter()\n with open(file_name, 'r') as f:\n for line in f:\n problem = json.loads(line)\n encoded_tree = problem['encoded_tree']\n token_set = set()\n fill_token_set(token_set, encoded_tree)\n for token in token_set:\n count[token] += 1\n text = problem['text']\n longest_text = max(len(text), longest_text)\n longest_program = max(len(flatten(encoded_tree)), longest_program)\n all_words.update(text)\n word_counter.update(text)\n\n unique_programs.add(str(problem['encoded_tree']))\n return count, word_counter\n\ndef main():\n global vocab_size\n if len(sys.argv) > 1:\n vocab_size = int(sys.argv[1])\n token_counts, word_counts = {}, {}\n used_tokens = 0\n for d_set in 'test dev train'.split():\n token_counts[d_set], word_counts[d_set] = analyze(d_set + '-' + str(vocab_size) + '.jsonl')\n for i in range(vocab_size):\n assert(token_counts['train'][i] > 0 or (token_counts['dev'][i] == 0 and token_counts['test'][i] == 0))\n if token_counts['train'][i] > 0:\n used_tokens += 1;\n for k in all_words:\n assert(word_counts['train'][k] > 0)\n assert(word_counts['train'][k] > 0)\n print(sorted([(token_counts['train'][i], token_counts['dev'][i], token_counts['test'][i], i) for i in range(vocab_size)]))\n print(sorted([(word_counts['train'][k], word_counts['dev'][k], word_counts['test'][k], k) for k in all_words]))\n print(\"%d words\"%len(all_words))\n print('%d used tokens'%used_tokens)\n print('longest text: %d'%longest_text)\n print('longest program: %d'%longest_program)\n print('unique programs: %d'%len(unique_programs))\n return 0\n\nif __name__ == '__main__':\n exit(main())\n","sub_path":"filtered_data/encoded/analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"224791369","text":"import numpy as np\n\n# Set values of constants used across all modules here\nMISSING_INT = -99\nINVALID_FLOAT = -99.0\nNUM_CHOICES = 3\nHOURS = np.array([0, 18, 38])\n\nDATA_LABLES_SIM = [\n \"Identifier\",\n \"Period\",\n \"Years_of_Education\",\n \"Lagged_Choice\",\n \"Experience_Part_Time\",\n \"Experience_Full_Time\",\n \"Type\",\n \"Choice\",\n \"Log_Systematic_Wage\",\n \"Period_Wage_N\",\n \"Period_Wage_P\",\n \"Period_Wage_F\",\n \"Non_Consumption_Utility_N\",\n \"Non_Consumption_Utility_P\",\n \"Non_Consumption_Utility_F\",\n \"Continuation_Value_N\",\n \"Continuation_Value_P\",\n \"Continuation_Value_F\",\n \"Value_Function_N\",\n \"Value_Function_P\",\n \"Value_Function_F\",\n]\n\n# Define data types for data set columns\nDATA_FORMATS_SIM = {\n key: (np.int if key in DATA_LABLES_SIM[:7] else np.float) for key in DATA_LABLES_SIM\n}\n","sub_path":"soepy/shared/shared_constants.py","file_name":"shared_constants.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"365450629","text":"class Solution(object):\n def countNumbersWithUniqueDigits(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n if n == 0:\n return 1\n if n == 1:\n return 10\n res = 10\n choices = 9\n for i in range(1, n):\n choices = choices * (10 - i)\n res += choices\n return res\n","sub_path":"Project/Leetcode/Backtracking/357. Count Numbers with Unique Digits.py","file_name":"357. Count Numbers with Unique Digits.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"522015134","text":"\nfrom rest_framework import serializers, generics\nfrom geoinfo.models import Polygon\n\n\nclass PolygonSerializer(serializers.ModelSerializer):\n # parentCategory = serializers.PrimaryKeyRelatedField()\n\n class Meta:\n model = Polygon\n # fields = ('subpolygons', 'polygon_id', 'address')\n fields = ('polygon_id', 'address')\n\n# PolygonSerializer.base_fields['subpolygons'] = PolygonSerializer()\n\n\nclass PolygonView(generics.ListAPIView):\n\n model = Polygon\n serializer_class = PolygonSerializer\n\n\ndef extractor(polygon_id):\n responce = {}\n polygon = Polygon.objects.get(polygon_id=polygon_id)\n if polygon.level == 4:\n orgs = {x.id: [x.name, x.org_type.name] for x in polygon.organizations.all()}\n responce[polygon.polygon_id] = {'address': polygon.address,\n 'orgs': orgs}\n return responce\n else:\n responce[polygon.polygon_id] = {'address': polygon.address,\n 'childs': {}}\n childs = polygon.polygon_set.all()\n for child in childs:\n responce[polygon.polygon_id]['childs'].update(extractor(child.polygon_id))\n\n return responce\n","sub_path":"geoinfo/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"577554131","text":"import numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport os\nimport matplotlib.pylab as plt\nimport seaborn as sns\nfrom sklearn import metrics\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import mean_absolute_error\nimport lightgbm as lgb\nimport warnings\nfrom datetime import datetime\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\n# MODEL NUMBER\nMODEL_NUMBER = 'M007'\n# Make a runid that is unique to the time this is run for easy tracking later\nrun_id = \"{:%m%d_%H%M}\".format(datetime.now())\n\nprint('Reading input files....')\ntrain_df = pd.read_csv('data/train_features.csv')\ntest_df = pd.read_csv('data/test_features.csv')\nss = pd.read_csv('input/sample_submission.csv')\nstructures = pd.read_csv('input/structures.csv')\n#####################\n# FEATURE CREATION\n#####################\nprint('Creating features....')\nprint('Available features {}'.format([x for x in train_df.columns]))\n#####################\n# CONFIGURABLES\n#####################\n\nrun_id = \"{:%m%d_%H%M}\".format(datetime.now())\n\nFEATURES = ['molecule_atom_index_0_dist_min',\n 'molecule_atom_index_0_dist_max',\n 'molecule_atom_index_1_dist_min',\n 'molecule_atom_index_0_dist_mean',\n 'molecule_atom_index_0_dist_std',\n 'dist',\n 'molecule_atom_index_1_dist_std',\n 'molecule_atom_index_1_dist_max',\n 'molecule_atom_index_1_dist_mean',\n 'molecule_atom_index_0_dist_max_diff',\n 'molecule_atom_index_0_dist_max_div',\n 'molecule_atom_index_0_dist_std_diff',\n 'molecule_atom_index_0_dist_std_div',\n 'atom_0_couples_count',\n 'molecule_atom_index_0_dist_min_div',\n 'molecule_atom_index_1_dist_std_diff',\n 'molecule_atom_index_0_dist_mean_div',\n 'atom_1_couples_count',\n 'molecule_atom_index_0_dist_mean_diff',\n 'molecule_couples',\n 'atom_index_1',\n 'molecule_dist_mean',\n 'molecule_atom_index_1_dist_max_diff',\n 'molecule_atom_index_0_y_1_std',\n 'molecule_atom_index_1_dist_mean_diff',\n 'molecule_atom_index_1_dist_std_div',\n 'molecule_atom_index_1_dist_mean_div',\n 'molecule_atom_index_1_dist_min_diff',\n 'molecule_atom_index_1_dist_min_div',\n 'molecule_atom_index_1_dist_max_div',\n 'molecule_atom_index_0_z_1_std',\n 'y_0',\n 'molecule_type_dist_std_diff',\n 'molecule_atom_1_dist_min_diff',\n 'molecule_atom_index_0_x_1_std',\n 'molecule_dist_min',\n 'molecule_atom_index_0_dist_min_diff',\n 'molecule_atom_index_0_y_1_mean_diff',\n 'molecule_type_dist_min',\n 'molecule_atom_1_dist_min_div',\n 'atom_index_0',\n 'molecule_dist_max',\n 'molecule_atom_1_dist_std_diff',\n 'molecule_type_dist_max',\n 'molecule_atom_index_0_y_1_max_diff',\n 'molecule_type_0_dist_std_diff',\n 'molecule_type_dist_mean_diff',\n 'molecule_atom_1_dist_mean',\n 'molecule_atom_index_0_y_1_mean_div',\n 'molecule_type_dist_mean_div',\n 'type']\n\nTARGET = 'scalar_coupling_constant'\nCAT_FEATS = ['atom_0', 'atom_1']\nN_ESTIMATORS = 500000\nVERBOSE = 500\nEARLY_STOPPING_ROUNDS = 500\nRANDOM_STATE = 529\nN_THREADS = 64\n\n#####################\n# CREATE FINAL DATASETS\n#####################\n\nX = train_df[FEATURES]\nX_test = test_df[FEATURES]\ny = train_df[TARGET]\n\n#####################\n# TRAIN MODEL\n#####################\nprint('Training model....')\nprint('Using features {}'.format([x for x in FEATURES]))\nlgb_params = {'num_leaves': 128,\n 'min_child_samples': 64,\n 'objective': 'regression',\n 'max_depth': 6,\n 'learning_rate': 0.9,\n \"boosting_type\": \"gbdt\",\n \"subsample_freq\": 1,\n \"subsample\": 0.9,\n \"bagging_seed\": 11,\n \"metric\": 'mae',\n \"verbosity\": -1,\n 'reg_alpha': 0.1,\n 'reg_lambda': 0.4,\n 'colsample_bytree': 1.0,\n }\n\nn_fold = 5\nfolds = KFold(n_splits=n_fold, shuffle=True, random_state=RANDOM_STATE)\n\n# Setup arrays for storing results\noof_df = train_df[['id','type','scalar_coupling_constant']].copy()\noof_df['oof_preds'] = 0\nprediction = np.zeros(len(X_test))\nscores = []\nfeature_importance = pd.DataFrame()\ntest_pred_df = test_df.copy()\ntest_pred_df['prediction'] = 0\nbond_count = 1\nnumber_of_bonds = len(X['type'].unique())\nfor bond_type in X['type'].unique():\n fold_count = 1\n # Train the model\n X_type = X.loc[X['type'] == bond_type]\n y_type = y.iloc[X_type.index]\n X_test_type = X_test.loc[X_test['type'] == bond_type]\n oof = np.zeros(len(X_type))\n prediction_type = np.zeros(len(X_test_type))\n for fold_n, (train_idx, valid_idx) in enumerate(folds.split(X_type)):\n print('Running Type {} - Fold {} of {}'.format(bond_type,\n fold_count, folds.n_splits))\n X_train, X_valid = X_type.iloc[train_idx], X_type.iloc[valid_idx]\n y_train, y_valid = y_type.iloc[train_idx], y_type.iloc[valid_idx]\n model = lgb.LGBMRegressor(**lgb_params, n_estimators=N_ESTIMATORS, n_jobs=N_THREADS)\n model.fit(X_train.drop('type', axis=1), y_train,\n eval_set=[(X_train.drop('type', axis=1), y_train),\n (X_valid.drop('type', axis=1), y_valid)],\n eval_metric='mae',\n verbose=VERBOSE,\n early_stopping_rounds=EARLY_STOPPING_ROUNDS)\n\n y_pred_valid = model.predict(X_valid.drop('type', axis=1),\n num_iteration=model.best_iteration_)\n y_pred = model.predict(X_test_type.drop('type', axis=1),\n num_iteration=model.best_iteration_)\n\n # feature importance\n fold_importance = pd.DataFrame()\n fold_importance[\"feature\"] = [feat for feat in FEATURES if not 'type']\n fold_importance[\"importance\"] = model.feature_importances_\n fold_importance[\"fold\"] = fold_n + 1\n feature_importance = pd.concat(\n [feature_importance, fold_importance], axis=0)\n\n scores.append(mean_absolute_error(y_valid, y_pred_valid))\n print('CV mean score: {0:.4f}, std: {1:.4f}.'.format(\n np.mean(scores), np.std(scores)))\n oof[valid_idx] = y_pred_valid.reshape(-1,)\n\n scores.append(mean_absolute_error(y_valid, y_pred_valid))\n prediction_type += y_pred\n fold_count += 1\n oof_df.loc[oof_df['type'] == bond_type, 'oof_preds'] = oof\n prediction_type /= folds.n_splits\n test_pred_df.loc[test_pred_df['type'] ==\n bond_type, 'prediction'] = prediction_type\n\n if bond_count != number_of_bonds:\n # Save the results inbetween bond type because it takes a long time\n submission_csv_name = 'submissions/temp{}of{}_{}_{}_submission_lgb_{}folds_{}iter.csv'.format(\n bond_count, number_of_bonds, MODEL_NUMBER, run_id, n_fold, N_ESTIMATORS)\n oof_csv_name = 'oof/temp{}of{}_{}_{}_oof_lgb_{}folds_{}iter.csv'.format(\n bond_count, number_of_bonds, MODEL_NUMBER, run_id, n_fold, N_ESTIMATORS)\n fi_csv_name = 'fi/temp{}of{}_{}_{}_fi_lgb_{}folds_{}iter.csv'.format(\n bond_count, number_of_bonds, MODEL_NUMBER, run_id, n_fold, N_ESTIMATORS)\n\n print('Saving Temporary LGB Submission files:')\n print(submission_csv_name)\n ss = pd.read_csv('input/sample_submission.csv')\n ss['scalar_coupling_constant'] = test_pred_df['prediction']\n ss.to_csv(submission_csv_name, index=False)\n ss.head()\n # OOF\n oof_df.to_csv(oof_csv_name, index=False)\n # Feature Importance\n feature_importance.to_csv(fi_csv_name, index=False)\n bond_count += 1\n\noof_score = mean_absolute_error(oof_df['scalar_coupling_constant'], oof_df['oof_preds'])\nprint('Out of fold score is {:.4f}'.format(oof_score))\n\n#####################\n# SAVE RESULTS\n#####################\n# Save Prediction and name appropriately\nsubmission_csv_name = 'submissions/{}_{}_submission_lgb_{}folds_{:.4f}CV_{}iter.csv'.format(MODEL_NUMBER,\n run_id,\n n_fold,\n oof_score,\n N_ESTIMATORS)\noof_csv_name = 'oof/{}_{}_oof_lgb_{}folds_{:.4f}CV_{}iter.csv'.format(\n MODEL_NUMBER, run_id, n_fold, oof_score, N_ESTIMATORS)\nfi_csv_name = 'fi/{}_{}_fi_lgb_{}folds_{:.4f}CV_{}iter.csv'.format(\n MODEL_NUMBER, run_id, n_fold, oof_score, N_ESTIMATORS)\n\nprint('Saving LGB Submission as:')\nprint(submission_csv_name)\nss = pd.read_csv('input/sample_submission.csv')\nss['scalar_coupling_constant'] = test_pred_df['prediction']\nss.to_csv(submission_csv_name, index=False)\nss.head()\n# OOF\noof_df.to_csv(oof_csv_name, index=False)\n# Feature Importance\nfeature_importance.to_csv(fi_csv_name, index=False)\n\nprint('Done!')\n","sub_path":"scripts/M007-LGBM-ModelTypesSeperateFeats.py","file_name":"M007-LGBM-ModelTypesSeperateFeats.py","file_ext":"py","file_size_in_byte":9393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"79020159","text":"import networkx as nx\nimport matplotlib.pyplot as plt\n\nprint(\"hi\")\n\nG= nx.Graph()\nG.add_edge('A','B', weight=13, relation='freind')\nG.add_edge('B','C', weight=9, relation='family')\nG.add_edge('B','D', weight=7, relation='freind')\nG.add_edge('E','B', weight=10, relation='freind')\nG.add_edge('E','A', weight=1, relation='enemy')\nG.add_edge('F','B', weight=13, relation='family')\nG.edges(data=True)\n\nprint(\"try to print the newtwork so far\")\nprint(G)\n# display what class g can do\n# print(help(G))\nfor key in G:\n print(\"key : {} , Value : {}\".format(key,G[key]))\nprint(\" bye \")\n\n","sub_path":"p2.py","file_name":"p2.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"99027574","text":"# coding:utf-8\n'''''\n@author: shisg\n'''\n\nimport time\nfrom bs4 import BeautifulSoup\nimport requests\nfrom crap.tgb_article import ArticleProcess\n\nheaders = {\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7',\n 'Cache-Control': 'max-age=0',\n 'Connection': 'keep-alive',\n 'Referer': 'https://www.taoguba.com.cn/moreTopic?userID=252069',\n 'User-Agnet': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',\n 'cookie': 'UM_distinctid=16950eb5a54158-0c6cccf55df01-323b5b03-1fa400-16950eb5a55a94; Hm_lvt_cc6a63a887a7d811c92b7cc41c441837=1551840795; tgbuser=3125922; tgbpwd=03994D83985svxpfe5fqugn4ca; JSESSIONID=b1def412-3377-448f-bdd8-72290294ede3; Hm_lpvt_cc6a63a887a7d811c92b7cc41c441837=1552532301; CNZZDATA1574657=cnzz_eid%3D1216197461-1551835725-https%253A%252F%252Fwww.baidu.com%252F%26ntime%3D1552529777'\n}\n\n# 此为判定网页编码格式的方法\n# import requests\n# res=requests.get('https://www.taoguba.com.cn/moreTopic?userID=252069')\n# print(res.encoding)\n\n# bolaaikong\n# url = 'https://www.taoguba.com.cn/moreTopic?userID=252069'\nurl = 'https://www.taoguba.com.cn/moreTopic?pageNum=31&pageNo=7&sortFlag=T&userID=252069'\nURL_PRE = 'https://www.taoguba.com.cn/'\n\n# 方法1,使用urlopen\n# request = urllib.request.Request(url, None, headers)\n# page = urllib.request.urlopen(request)\n# contents = page.read()\n\n# 方法2,使用requests,这样就不用gzip解码了,自动会解压\n# 直接在header里面带上cookie,对我目前来说最为方便,省去什么cookie文件啊,先登录再保存cookie之类的麻烦。\n# 只需电脑登录一次,然后拷贝出来即可,其实更加省力。因为麻烦的其实是一个个去访问网页,拷贝出来,那个cookie不麻烦。\n\npage = requests.get(url, headers=headers)\ncontents = page.text\n\nsoup = BeautifulSoup(contents, \"html.parser\")\narticle_link_list = []\n\n# 将当前页的文章都加入列表中\nprint('开始获取文章列表...')\nfor tag in soup.find_all('td', class_='suh'):\n # print tag\n title = tag.find('a').get_text().strip() # 分析文章列表,使用特定的节点class或者id,灵活调节\n link = tag.find('a').get('href')\n print(title + ' (' + link + ')')\n article_link_list.append(URL_PRE + link)\n\nprint('文章获取完毕,共', len(article_link_list), '篇.')\nprint('开始获取文章内容...')\n\n# 处理当前页的列表文档,分别下载保存成pdf。\npath = './Article/'\nprocess_count = 1\nfailed_count = 0\nfor article_url in article_link_list:\n print('正在处理第', process_count, '篇...')\n try:\n ArticleProcess.save_pdf(article_url, path)\n print('第', process_count, '篇下载完毕.')\n except:\n print('第', process_count, '篇下载失败(' + article_url + ')')\n failed_count += 1\n\n process_count += 1\n time.sleep(2)\n\n\nprint('文件保存完毕, 失败', failed_count, '篇.')\n\n\n\n\n\n# 以下为参考用法,网页分析\n# page = urllib.request.urlopen('http://movie.douban.com/top250?format=text')\n# contents = page.read()\n# # print(contents)\n# soup = BeautifulSoup(contents, \"html.parser\")\n# print(\"豆瓣电影TOP250\" + \"\\n\" + \" 影片名 评分 评价人数 链接 \")\n# for tag in soup.find_all('div', class_='info'):\n# # print tag\n# m_name = tag.find('span', class_='title').get_text()\n# m_rating_score = float(tag.find('span', class_='rating_num').get_text())\n# m_people = tag.find('div', class_=\"star\")\n# m_span = m_people.findAll('span')\n# m_peoplecount = m_span[3].contents[0]\n# m_url = tag.find('a').get('href')\n# print(m_name + \" \" + str(m_rating_score) + \" \" + m_peoplecount + \" \" + m_url)\n","sub_path":"crap/tgb.py","file_name":"tgb.py","file_ext":"py","file_size_in_byte":3864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"108253684","text":"from __future__ import division\nfrom optparse import OptionParser\nimport subprocess\n#from pyCondorSTAMPLib import *\nimport os\n\n# command line options\nparser = OptionParser()\nparser.set_defaults(verbose = False)\nparser.add_option(\"-d\", \"--dir\", dest = \"targetDirectory\",\n help = \"Path to job directory\",\n metavar = \"DIRECTORY\")\nparser.add_option(\"-o\", \"--output_dir\", dest = \"output_dir\",\n help = \"Path to directory to save standard output from executables called by this script\",\n metavar = \"DIRECTORY\")\nparser.add_option(\"-v\", action=\"store_true\", dest=\"verbose\")\n\n(options, args) = parser.parse_args()\n\ndef glueFileLocation(directory, filename):\n output = None\n if directory[-1] == \"/\":\n if filename[0] == \"/\":\n output = directory + filename[1:]\n else:\n output = directory + filename\n else:\n if filename[0] == \"/\":\n output = directory + filename\n else:\n output = directory + \"/\" + filename\n return output\n\nmmeExecutable = \"/home/quitzow/GIT/Development_Branches/MatlabExecutableDuctTape/getSNRandCluster\"\n\njobsDir = glueFileLocation(options.targetDirectory, \"jobs\")\njobGroupDirs = [glueFileLocation(jobsDir, x) for x in os.listdir(jobsDir) if \"job_group\" in x]\nindividualJobDirs = [glueFileLocation(glueFileLocation(y, x), \"grandstochtrackOutput\") for y in jobGroupDirs for x in os.listdir(y) if \"job\" in x]\nbkndOutputFiles = [glueFileLocation(y, x) for y in individualJobDirs for x in os.listdir(y) if \"bknd\" in x]\n\njobs = {}\njob_order = []\ntempJob = None\n\noutput_text = \"\"\n\nfor num in range(len(bkndOutputFiles)):\n files = os.listdir(individualJobDirs[num])\n if 'max_snr.txt' not in files:\n command = [mmeExecutable, bkndOutputFiles[num], individualJobDirs[num]]\n printable_command = \" \".join(x for x in command)\n print(printable_command)\n output_text += printable_command\n command_output = subprocess.Popen(command, stdout = subprocess.PIPE, stderr=subprocess.PIPE).communicate()#[0]\n output_text += command_output[0]\n print(command_output[0])\n if command_output[1]:\n output_text += \"ERROR\\n\" + command_output[1] + \"\\n\\n\\n\"\n print(command_output[1])\n else:\n print(\"Success\")\n output_text += \"\\nSuccess\\n\\n\"\n\nif options.output_dir:\n with open(options.output_dir + \"/nonCondorMatlabReader_output.txt\", \"w\") as outfile:\n outfile.write(output_text)\nprint(output_text)\n","sub_path":"nonCondorMatReader.py","file_name":"nonCondorMatReader.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"26351000","text":"import os\n\nclass node:\n def __init__(self,data=None):\n self.data=data\n self.next=None\n\n\nclass ll:\n def __init__(self):\n self.head=None\n\n def search(self,data):\n newnod=node(data)\n curr=self.head\n while curr.next != None:\n if curr.data == newnod.data:\n print(\"Item found\")\n ll.remove(self,newnod.data)\n return True\n curr=curr.next\n \n return False\n \n def add(self,data):\n newnod=node(data)\n if self.head == None:\n self.head = newnod\n else:\n curr = self.head\n while curr.next != None:\n curr=curr.next\n newnod.next = curr.next\n curr.next=newnod\n \n \n def remove(self,data):\n curr=self.head\n prev=self.head\n while curr.data != data:\n prev=curr\n curr=curr.next\n prev.next=curr.next\n print(\"Item removed\")\n ll.disp(self)\n \n def disp(self):\n curr=self.head\n while curr is not None:\n print(curr.data)\n curr=curr.next\n # print(curr.data)\n\n def size(self): #count the number of elements in list\n sh = self.head \n count = 0\n while sh.next != None:\n count +=1\n sh = sh.next\n count +=1\n return count\n\n def element(self):\n ptr = self.head\n while ptr != None:\n f.write(\" \",ptr.data)\n f.write(\" \",ptr.data)\n \n\nl = ll()\nf = open('DataS.txt', 'r') # Open and read the file\nfile = f.read().split(\"\\n\")\nf.close()\nfor i in file:\n print(i)\nprint()\n\nprint(\"Current elements in the list are\")\nl.disp()\n\nfor i in file: \n l.add(i)\nword = input(\"Enter the word, to check\") #reading word from the user\n\nresult = l.search(word) #searching the element in list\nprint(result)\n\nif result:\n l.remove(word) #removing the word\n print(\"After Searching element in the list \")\n l.disp()\n print(\"element removed\")\n \n f = open('DataS.txt', 'w') #Delete the word form the file by over writting.\n f.write('')\n f.close()\n length = l.size()\n f = open('DataS.txt', 'a+')\n l.element()\n f.close()\nelse:\n l.add(word) #word not found hence adding it to end of list\n print(\"Updated elements are\")\n l.disp()\n \n f = open('DataS.txt', 'a+') # adding the word to file\n f.write(word)\n f.close()","sub_path":"Week2/UnorderedLL.py","file_name":"UnorderedLL.py","file_ext":"py","file_size_in_byte":2730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"391830717","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nimport sys\nsys.path.append('../')\nimport classifier.elm as elm\n\nemotion_list = ['Angry', 'Fear', 'Happy', 'Neutral', 'Sad', 'Surprise']\nlabel = {'Angry':0, 'Fear':1, 'Happy':2, 'Neutral':3, 'Sad':4, 'Surprise':5}\nonehot = {'Angry':[1,0,0,0,0,0], 'Fear':[0,1,0,0,0,0], 'Happy':[0,0,1,0,0,0],\n 'Neutral':[0,0,0,1,0,0], 'Sad':[0,0,0,0,1,0], 'Surprise':[0,0,0,0,0,1]}\n\ndef SegPostProbDictAndHighFeat(y_pred, SegNumDictory):\n\n PostProbDictory = {'Angry': {}, 'Fear': {}, 'Happy': {}, 'Neutral': {}, 'Sad': {}, 'Surprise': {}}\n utteranceFeatures = {'Angry': {}, 'Fear': {}, 'Happy': {}, 'Neutral': {}, 'Sad': {}, 'Surprise': {}}\n\n highFeat = np.array([])\n highLabel = np.array([])\n\n startFrame_index = 0\n endFrame_index = 0\n\n for i in range(len(emotion_list)):\n for wavfile, seg_num in SegNumDictory[emotion_list[i]].items():\n if seg_num > 0:\n endFrame_index = endFrame_index + seg_num\n PostProbDictory[emotion_list[i]][wavfile] = y_pred[startFrame_index:endFrame_index]\n\n arr_1 = caculate_PropSupport(seg_num, startFrame_index, y_pred, 0.1)\n arr_2 = caculate_PropSupport(seg_num, startFrame_index, y_pred, 0.2)\n arr_3 = caculate_PropSupport(seg_num, startFrame_index, y_pred, 0.3)\n arr_4 = caculate_PropSupport(seg_num, startFrame_index, y_pred, 0.4)\n arr_5 = caculate_PropSupport(seg_num, startFrame_index, y_pred, 0.5)\n arr_6 = caculate_PropSupport(seg_num, startFrame_index, y_pred, 0.6)\n arr_7 = caculate_PropSupport(seg_num, startFrame_index, y_pred, 0.7)\n arr_8 = caculate_PropSupport(seg_num, startFrame_index, y_pred, 0.8)\n\n utteranceFeatures[emotion_list[i]][wavfile] = np.hstack(\n (np.mean(y_pred[startFrame_index:endFrame_index], axis=0),\n np.percentile(y_pred[startFrame_index:endFrame_index], 1, axis=0),\n np.percentile(y_pred[startFrame_index:endFrame_index], 25, axis=0),\n np.percentile(y_pred[startFrame_index:endFrame_index], 50, axis=0),\n np.percentile(y_pred[startFrame_index:endFrame_index], 75, axis=0),\n np.percentile(y_pred[startFrame_index:endFrame_index], 99, axis=0),\n arr_1,\n arr_2,\n arr_3,\n arr_4,\n arr_5,\n arr_6,\n arr_7,\n arr_8)\n )\n\n\n if len(highFeat) == 0:\n highFeat = utteranceFeatures[emotion_list[i]][wavfile]\n highLabel = onehot[emotion_list[i]]\n highLabel_int = label[emotion_list[i]]\n\n else:\n highFeat = np.vstack((highFeat, utteranceFeatures[emotion_list[i]][wavfile]))\n highLabel = np.vstack((highLabel, onehot[emotion_list[i]]))\n highLabel_int = np.vstack((highLabel_int, label[emotion_list[i]]))\n\n startFrame_index = startFrame_index + seg_num\n\n return highFeat, highLabel, highLabel_int, PostProbDictory\n\ndef UtterContour_Padding(y_pred, y_embd, SegNumDictory):\n\n ProbContour = {'Angry': {}, 'Fear': {}, 'Happy': {}, 'Neutral': {}, 'Sad': {}, 'Surprise': {}}\n EmbdContour = {'Angry': {}, 'Fear': {}, 'Happy': {}, 'Neutral': {}, 'Sad': {}, 'Surprise': {}}\n\n ProbContour_1D = np.array([])\n EmbdContour_1D = np.array([])\n\n startFrame_index = 0\n endFrame_index = 0\n time_steps = 15\n\n for i in range(len(emotion_list)):\n for wavfile, seg_num in SegNumDictory[emotion_list[i]].items():\n if seg_num > time_steps:\n endFrame_index = endFrame_index + seg_num\n ProbContour[emotion_list[i]][wavfile] = y_pred[startFrame_index:startFrame_index+time_steps]\n EmbdContour[emotion_list[i]][wavfile] = y_embd[startFrame_index:startFrame_index+time_steps]\n elif seg_num > 0 and seg_num <= time_steps:\n endFrame_index = endFrame_index + seg_num\n Prob_pad = np.zeros([time_steps-seg_num, 6])\n Embd_pad = np.zeros([time_steps-seg_num, 64])\n ProbContour[emotion_list[i]][wavfile] = np.vstack((y_pred[startFrame_index:endFrame_index], Prob_pad))\n EmbdContour[emotion_list[i]][wavfile] = np.vstack((y_embd[startFrame_index:endFrame_index], Embd_pad))\n\n startFrame_index = startFrame_index + seg_num\n\n for i in range(len(emotion_list)):\n for wavfile, seg_num in SegNumDictory[emotion_list[i]].items():\n if len(ProbContour_1D) == 0:\n ProbContour_1D = np.array(np.concatenate(ProbContour[emotion_list[i]][wavfile], axis=0))\n EmbdContour_1D = np.array(np.concatenate(EmbdContour[emotion_list[i]][wavfile], axis=0))\n UtterLabel = onehot[emotion_list[i]] # Onehot label\n # UtterLabel = label[emotion_list[i]]\n else:\n ProbContour_1D = np.vstack((ProbContour_1D, np.array(np.concatenate(ProbContour[emotion_list[i]][wavfile], axis=0))))\n EmbdContour_1D = np.vstack((EmbdContour_1D, np.array(np.concatenate(EmbdContour[emotion_list[i]][wavfile], axis=0))))\n UtterLabel = np.vstack((UtterLabel, onehot[emotion_list[i]])) # Onehot label\n # UtterLabel = np.vstack((UtterLabel, label[emotion_list[i]]))\n\n return ProbContour, EmbdContour, ProbContour_1D, EmbdContour_1D, UtterLabel\n\ndef UtterContour_NoPadding(y_pred, y_embd, SegNumDictory):\n\n ### store variable lenth array unsolved ###\n\n ProbContour = {'Angry': {}, 'Fear': {}, 'Happy': {}, 'Neutral': {}, 'Sad': {}, 'Surprise': {}}\n EmbdContour = {'Angry': {}, 'Fear': {}, 'Happy': {}, 'Neutral': {}, 'Sad': {}, 'Surprise': {}}\n\n ProbContour_1D = np.array([])\n EmbdContour_1D = np.array([])\n\n startFrame_index = 0\n endFrame_index = 0\n\n for i in range(len(emotion_list)):\n for wavfile, seg_num in SegNumDictory[emotion_list[i]].items():\n print('wavfile: {}, seg_num: {}'.format(wavfile, seg_num))\n endFrame_index = endFrame_index + seg_num\n ProbContour[emotion_list[i]][wavfile] = y_pred[startFrame_index:startFrame_index + seg_num]\n EmbdContour[emotion_list[i]][wavfile] = y_embd[startFrame_index:startFrame_index + seg_num]\n\n startFrame_index = startFrame_index + seg_num\n\n print(ProbContour)\n\n for i in range(len(emotion_list)):\n for wavfile, seg_num in SegNumDictory[emotion_list[i]].items():\n if len(ProbContour_1D) == 0:\n ProbContour_1D = np.array(np.concatenate(ProbContour[emotion_list[i]][wavfile], axis=0))\n EmbdContour_1D = np.array(np.concatenate(EmbdContour[emotion_list[i]][wavfile], axis=0))\n UtterLabel = onehot[emotion_list[i]] # Onehot label\n # UtterLabel = label[emotion_list[i]]\n else:\n ProbContour_1D = np.vstack((ProbContour_1D, np.array(np.concatenate(ProbContour[emotion_list[i]][wavfile], axis=0))))\n EmbdContour_1D = np.vstack((EmbdContour_1D, np.array(np.concatenate(EmbdContour[emotion_list[i]][wavfile], axis=0))))\n UtterLabel = np.vstack((UtterLabel, onehot[emotion_list[i]])) # Onehot label\n # UtterLabel = np.vstack((UtterLabel, label[emotion_list[i]]))\n\n return ProbContour, EmbdContour, ProbContour_1D, EmbdContour_1D, UtterLabel\n\ndef visual_prob_CASIA(posterior):\n\n plt.rc('xtick', labelsize=12)\n plt.rc('ytick', labelsize=12)\n\n new_ticks = [0, 10, 20, 30, 40, 50, 60]\n plt.xticks(new_ticks)\n\n x = np.arange(1, len(posterior[:, 0]) + 1)\n y = posterior[:, 0]\n plt.plot(x, y, 'ro:', markersize='8', fillstyle = 'none', label='angry')\n\n y = posterior[:, 1]\n plt.plot(x, y, 'y*--', markersize='8', label='fear')\n\n y = posterior[:, 2]\n plt.plot(x, y, 'gv-.', markersize='8', fillstyle = 'none', label='happy')\n\n y = posterior[:, 3]\n plt.plot(x, y, 'cd:', markersize='8', fillstyle = 'none', label='neutral')\n\n y = posterior[:, 4]\n plt.plot(x, y, 'bs--', markersize='8', fillstyle = 'none', label='sad')\n\n y = posterior[:, 5]\n plt.plot(x, y, 'mx-.', markersize='8', label='surprise')\n\n plt.legend(loc='upper right')\n\n # plt.ylim(-76, -74.5)\n plt.xlim(0, len(posterior[:, 0])+8)\n plt.title('DNN output', fontsize=18)\n plt.xlabel('Segment index', fontsize=18)\n plt.ylabel('Probability', fontsize=18)\n\n plt.show()\n\ndef elm_classifier(sess, train_highFeat, train_highLabel, test_highFeat, test_highLabel):\n hidden_num_elm = 100\n batch_size_elm = len(train_highFeat)\n input_size_elm = len(train_highFeat[0])\n\n print('input_size_elm: ', input_size_elm)\n print('batch_size_elm: ', batch_size_elm)\n\n ELM_ = elm.ELM(sess, batch_size_elm, input_size_elm, hidden_num_elm, len(emotion_list))\n\n # one-step feed-forward training\n ELM_.feed(train_highFeat, train_highLabel)\n\n # testing\n ELM_.test(test_highFeat, test_highLabel)\n\ndef caculate_PropSupport(seg_num, startFrame_index, y_pred, threshold):\n array = np.zeros(len(emotion_list))\n for i in range(seg_num):\n for j in range(len(emotion_list)):\n if y_pred[startFrame_index + i][j] > threshold:\n array[j] = array[j] + 1\n array = array * 1.0 / seg_num\n\n return array\n\ndef frame_acc(confu_matri, test_allLabels):\n accu = [0, 0, 0, 0, 0, 0]\n column = [0, 0, 0, 0, 0, 0]\n line = [0, 0, 0, 0, 0, 0]\n accuracy = 0\n recall = 0\n precision = 0\n for i in range(6):\n accu[i] = confu_matri[i][i]\n for i in range(6):\n for j in range(6):\n column[i] += confu_matri[j][i]\n for i in range(6):\n for j in range(6):\n line[i] += confu_matri[i][j]\n for i in range(6):\n accuracy += float(accu[i]) / len(test_allLabels)\n for i in range(6):\n if column[i] != 0:\n recall += float(accu[i]) / column[i]\n recall = recall / 6\n for i in range(6):\n if line[i] != 0:\n precision += float(accu[i]) / line[i]\n precision = precision / 6\n f1_score = (2 * (precision * recall)) / (precision + recall)\n\n print('confusion_matrix:\\n', confu_matri)\n print('precion:\\n\\t', precision)\n print('f1_score:\\n\\t', f1_score)","sub_path":"utils/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":10488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"71822934","text":"from kubernetes import client\r\nfrom kubernetes.client.rest import ApiException\r\nfrom captain import settings\r\nfrom utils.datetime_encode import DateEncoder\r\nimport json\r\nfrom kubernetes.client.models.v1_deployment_status import V1DeploymentStatus\r\nfrom kubernetes.client import Configuration\r\n # SSL/TLS verification\r\n # 将此设置为false可在从https服务器调用API时跳过验证SSL证书。\r\n #self.verify_ssl = True\r\n # 设置此选项可自定义证书文件以验证对等方。\r\n #self.ssl_ca_cert = None\r\n # 客户端证书文件\r\n #self.cert_file = None\r\n # 客户端密钥文件\r\n #self.key_file = None\r\n # 将其设置为True/False以启用/禁用SSL主机名验证。\r\n #self.assert_hostname = None\r\n\r\n\r\nclass DeploymentCrud(object):\r\n def __init__(self):\r\n token_file = settings.TOKEN_FILE\r\n ca_file = settings.CA_FILE\r\n with open(token_file, 'r') as file:\r\n token = file.read().strip('\\n')\r\n k8s = settings.KUBERNETES\r\n configuration = client.Configuration()\r\n configuration.host = k8s\r\n configuration.verify_ssl = True\r\n configuration.ssl_ca_cert = ca_file\r\n configuration.api_key = {\"authorization\": \"Bearer \" + token}\r\n configuration.assert_hostname = settings.HOSTNAME\r\n client.Configuration.set_default(configuration)\r\n self.apps_v1 = client.AppsV1Api()\r\n\r\n# ---------------------------------get deployment object or message-------------------------------------------------\r\n def get_deployment_status(self, deployment_name, namespace):\r\n result = dict()\r\n result['kind'] = 'Status'\r\n result['deployment_name'] = deployment_name\r\n result['namespace'] = namespace\r\n boolean, resp= self.get_deployment_object(deployment_name=deployment_name, namespace=namespace)\r\n if boolean: #object\r\n data = resp.to_dict()\r\n data[\"code\"] = \"200\"\r\n data = json.dumps(data,cls=DateEncoder)\r\n result['details'] = json.loads(data)\r\n else: # 403/404\r\n result['details'] = json.loads(resp.body)\r\n return result\r\n\r\n def get_deployment_object(self, deployment_name, namespace): # return [boolean, object/exception]\r\n try:\r\n deployment = self.apps_v1.read_namespaced_deployment(name=deployment_name, namespace=namespace)\r\n return True, deployment\r\n except ApiException as e:\r\n return False, e\r\n\r\n# -----------------------------------create deployment message------------------------------------------------------\r\n def get_deployment_template(self, deployment_name, image, replicas): # 获取模板\r\n # Configureate Pod template container\r\n container = client.V1Container(\r\n name=deployment_name,\r\n image=image,\r\n # command=['tail','-f','/dev/null'],\r\n ports=[client.V1ContainerPort(container_port=80)])\r\n # Create and configurate a spec section\r\n template = client.V1PodTemplateSpec(\r\n metadata=client.V1ObjectMeta(labels={\"app\": \"nginx\"}),\r\n spec=client.V1PodSpec(containers=[container]))\r\n # Create the specification of deployment\r\n spec = client.V1DeploymentSpec(\r\n replicas=replicas,\r\n template=template,\r\n selector={'matchLabels': {'app': 'nginx'}})\r\n # Instantiate the deployment object\r\n deployment = client.V1Deployment(\r\n api_version=\"apps/v1\",\r\n kind=\"Deployment\",\r\n metadata=client.V1ObjectMeta(name=deployment_name),\r\n spec=spec)\r\n\r\n return deployment\r\n\r\n\r\n def create_deployment(self, deployment_name, namespace, image, replicas):\r\n # Create deployement\r\n result = dict()\r\n result['kind'] = 'Create'\r\n result['deployment_name'] = deployment_name\r\n result['namespace'] = namespace\r\n boolean, resp = self.get_deployment_object(deployment_name=deployment_name, namespace=namespace)\r\n\r\n if boolean: # 已存在\r\n return self.get_deployment_status(deployment_name=deployment_name, namespace=namespace)\r\n elif resp.status == 404: # 不存在,则新建\r\n api_response = self.apps_v1.create_namespaced_deployment(\r\n body=self.get_deployment_template(deployment_name=deployment_name, image=image, replicas=replicas),\r\n namespace=namespace)\r\n data = api_response.to_dict()\r\n data[\"code\"] = \"200\"\r\n data = json.dumps(data, cls=DateEncoder)\r\n result['details'] = json.loads(data)\r\n return result\r\n else: # 403 or 500 and so on\r\n result['details'] = json.loads(resp.body)\r\n return result\r\n\r\n def update_deployment(self, deployment_name, namespace, image=None, replicas=None): # 更新镜像和副本\r\n # Update container image\r\n result = dict()\r\n result['kind'] = 'Update'\r\n result['deployment_name'] = deployment_name\r\n result['namespace'] =namespace\r\n\r\n boolean, resp = self.get_deployment_object(deployment_name=deployment_name, namespace=namespace)\r\n\r\n if boolean: # 找到目标\r\n if image and replicas:\r\n resp.spec.template.spec.containers[0].image = image\r\n resp.spec.replicas = replicas\r\n elif replicas:\r\n resp.spec.replicas = replicas\r\n elif image:\r\n resp.spec.template.spec.containers[0].image = image\r\n else:\r\n result['details'] = {\"status\":\"Params not provides\",\"code\":101}\r\n return result\r\n # Update the deployment\r\n api_response = self.apps_v1.patch_namespaced_deployment(\r\n name=deployment_name,\r\n namespace=\"default\",\r\n body=resp)\r\n\r\n data = api_response.status.to_dict()\r\n data[\"code\"] = \"200\"\r\n data = json.dumps(data, cls=DateEncoder)\r\n result['details'] = json.loads(data)\r\n return result\r\n else: # 没有该目标\r\n result['details'] = json.loads(resp.body)\r\n return result\r\n\r\n\r\n def delete_deployment(self, deployment_name, namespace):\r\n # Delete deployment\r\n boolean, resp = self.get_deployment_object(deployment_name=deployment_name, namespace=namespace)\r\n result = dict()\r\n result['kind'] = 'Delete'\r\n result['deployment_name'] = deployment_name\r\n result['namespace'] = namespace\r\n if boolean:\r\n self.apps_v1.delete_namespaced_deployment(\r\n name=deployment_name,\r\n namespace=namespace,\r\n body=client.V1DeleteOptions(\r\n propagation_policy='Foreground',\r\n grace_period_seconds=5))\r\n data = resp.to_dict()\r\n data[\"code\"] = \"200\"\r\n data = json.dumps(data, cls=DateEncoder)\r\n result['details'] = json.loads(data)\r\n return result\r\n else: # 403/404\r\n result['details'] = json.loads(resp.body)\r\n return result\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef main():\r\n\r\n deployment_name = \"nginx-deployment\"\r\n namespace = 'default'\r\n image = \"nginx:1.12.0\"\r\n replicas = 2\r\n dep = DeploymentCrud()\r\n print(dep.get_deployment_status()) # 返回kind = Status\r\n # print(dep.create_deployment()) # 存在kind = Status,不在kind = Create\r\n # print(dep.delete_deployment()) # detail = [status,404,403]\r\n # print(dep.get_deployment_status())\r\n\r\n # print(dep.update_deployment(replicas=replicas))\r\n # dep.update_deployment_image(replicas)\r\n # update_deployment_replicas(apps_v1, deployment)\r\n\r\n # delete_deployment(apps_v1)\r\n\r\n\r\n # =======================================================================\r\n\r\n # get_deployment_status(apps_v1, deployment_name, namespace)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"thirdparty/deployment_crud.py","file_name":"deployment_crud.py","file_ext":"py","file_size_in_byte":8138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"505008646","text":"import os\nfrom flask import Flask,url_for\n\n\ndef create_app(test_config=None):\n # create and configure the app\n app = Flask(__name__, instance_relative_config=True)\n app.config.from_mapping(\n SECRET_KEY='dev',\n UPLOAD_FOLDER=\"/tmp\",\n APP_ROOT=os.path.dirname(os.path.abspath(__file__)),\n RESULTS_DIR=os.path.dirname(os.path.abspath(__file__))+\"/static/results\"\n )\n\n if test_config is None:\n # load the instance config, if it exists, when not testing\n app.config.from_pyfile('config.py', silent=True)\n else:\n # load the test config if passed in\n app.config.from_mapping(test_config)\n\n # ensure the instance folder exists\n\n from . import home\n app.register_blueprint(home.bp)\n\n return app\n","sub_path":"malaria_profiler_webserver/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"126170817","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Jan 20 2019\n\n@author: zhaoyu\n\"\"\"\nfrom config import config\nimport glob\nimport os\nimport string\nimport numpy as np\nimport pandas as pd\nimport SimpleITK as sitk\nfrom train import value_predict\n\n#%%\n#step = 'Step4A'\nstep = 'Step4B'\n\n\ndimz = config['dimz']\ndimx = config['dimx']\ndimy = config['dimy']\nchannelNum = config['channelNum']\n#%%\ndef reWriteFiles(hdrFileList, imgFileList, IfNorm):\n arrayDict = dict()\n for i in range(len(hdrFileList)):\n # chage file name of .img\n currentfile = hdrFileList[i] \n name, ext = os.path.splitext(currentfile)\n Bname = os.path.basename(name)\n Dname = os.path.dirname(currentfile)\n if len(Bname.split(\"_\"))>1 and (Bname.split(\"_\")[1]).isdigit():\n item = string.join(Bname.split(\"_\")[0:2], \"_\")\n else:\n item = string.join(Bname.split(\"_\")[0:1], \"_\") \n if item.startswith('sw'):\n item = item[2:]\n newname = os.path.join(Dname,item+ext)\n os.rename(currentfile, newname)\n\n # chage file name of .hdr\n currentfile = imgFileList[i] \n name, ext = os.path.splitext(currentfile)\n Bname = os.path.basename(name)\n Dname = os.path.dirname(currentfile)\n if len(Bname.split(\"_\"))>1 and Bname.split(\"_\")[1].isdigit():\n item = string.join(Bname.split(\"_\")[0:2], \"_\")\n else:\n item = string.join(Bname.split(\"_\")[0:1], \"_\") \n if item.startswith('sw'):\n item = item[2:]\n newname = os.path.join(Dname,item+ext)\n os.rename(currentfile, newname)\n\n currentImg = sitk.ReadImage(newname)\n currentArray = sitk.GetArrayFromImage(currentImg)\n if IfNorm == True:\n mean = np.mean(currentArray) # mean for data centering\n std = np.std(currentArray) # std for data normalization\n currentArray -= mean\n currentArray /= std\n arrayDict[item] = currentArray\n if i == 0:\n shapeZ = currentArray.shape[0]\n shapeX = currentArray.shape[1]\n shapeY = currentArray.shape[2]\n return arrayDict, shapeZ, shapeX, shapeY\n\ndef WritePredictResultToCSV(list_ID, list_value, outCSV):\n dataRestore = {'patientID':list_ID, 'patientValue':list_value}\n df = pd.DataFrame(data=dataRestore) \n df.to_csv(outCSV) \n\ndef finalTestDataPrepare(hdrFileList, imgFileList, IfNorm, IfSave = True):\n arrayDict, shapeZ, shapeX, shapeY = reWriteFiles(hdrFileList, imgFileList, IfNorm)\n\n # prepare trainging data and label\n sorted_keys = sorted(arrayDict.keys(), reverse=False)\n NN = len(sorted_keys)\n ImgArray = np.zeros((NN,shapeZ,shapeX,shapeY,1))\n\n ii=0\n for currentKey in sorted_keys:\n ImgArray[ii,:,:,:,0] = arrayDict[currentKey]\n ii += 1\n\n if IfSave == True:\n if not os.path.exists(tempStore):\n os.mkdir(tempStore)\n np.save(os.path.join(tempStore, 'x_train_' + step + '.npy'), ImgArray)\n np.save(os.path.join(tempStore, 'ID_train_' + step + '.npy'), sorted_keys)\n \n return ImgArray, sorted_keys\n\ndef oneModelEvaluate(ImgArray, list_ID, BMtype, load_weight_dir, target_names, outCSV): \n y_predict = value_predict(ImgArray, BMtype, load_weight_dir, outputDir=None)\n Y_predict = np.argmax(y_predict, axis=1)\n Y_predict_name = list()\n for item in Y_predict:\n Y_predict_name.append(target_names[item])\n WritePredictResultToCSV(list_ID, Y_predict_name, outCSV)\n\ndef MultiModelEvaluate(ImgArray, list_ID, BM_list, weight_dir_list, target_names, outCSV):\n y_predict_list = list()\n for i in range(len(weight_dir_list)):\n y_predict = value_predict(ImgArray, BM_list[i], weight_dir_list[i], None)\n y_predict_list.append(y_predict)\n print(i)\n y_predict_mean = np.mean(np.array(y_predict_list), axis=0)\n Y_predict = np.argmax(y_predict_mean, axis=1)\n Y_predict_name = list()\n for item in Y_predict:\n Y_predict_name.append(target_names[item])\n WritePredictResultToCSV(list_ID, Y_predict_name, outCSV)\n\nif __name__ == \"__main__\":\n target_names = ['MSA', 'PD', 'PSP']\n \n #%%\n # original data\n tempStore = './tempData'\n dataDir = os.path.join('../PD_PET_Data', step)\n\n IfNorm = False\n\n Format1 = '/*.hdr'\n hdrFileList = glob.glob((dataDir+Format1))\n hdrFileList.sort()\n Format2 = '/*.img'\n imgFileList = glob.glob((dataDir+Format2))\n imgFileList.sort()\n\n ImgArray, list_ID = finalTestDataPrepare(hdrFileList, imgFileList, IfNorm, IfSave = True)\n \n # resori 1_1_1\n GM_singal_resori_1_1_1 = list()\n resori_list = ['resori']*6\n rootdir = '/media/data/yuzhao/project/PD_PET_Classify_GM/code_classify_resori/tempData'\n for i in range(6):\n GM_singal_resori_1_1_1.append(os.path.join(rootdir,'fold_{0}'.format(i),\n 'restore_2019_03_06','resori_1_1_1','Weights.h5')) \n outCSV = os.path.join(dataDir,step + '_GM_singal_resori_1_1_1.csv')\n MultiModelEvaluate(ImgArray, list_ID, resori_list, GM_singal_resori_1_1_1, target_names, outCSV)\n\n # resori 5_1_5\n GM_singal_resori_5_1_5 = list()\n resori_list = ['resori']*6\n rootdir = '/media/data/yuzhao/project/PD_PET_Classify_GM/code_classify_resori/tempData'\n for i in range(6):\n GM_singal_resori_5_1_5.append(os.path.join(rootdir,'fold_{0}'.format(i),\n 'restore_2019_03_06','resori_5_1_5','Weights.h5')) \n outCSV = os.path.join(dataDir, step + '_GM_singal_resori_5_1_5.csv')\n MultiModelEvaluate(ImgArray, list_ID, resori_list, GM_singal_resori_5_1_5, target_names, outCSV)\n\n # dense 1_1_1\n GM_singal_dense_1_1_1 = list()\n dense_list = ['dense']*6\n rootdir = '/media/data/yuzhao/project/PD_PET_Classify_GM/code_classify_dense/tempData'\n for i in range(6):\n GM_singal_dense_1_1_1.append(os.path.join(rootdir,'fold_{0}'.format(i),\n 'restore_2019_03_06','dense_1_1_1','Weights.h5')) \n outCSV = os.path.join(dataDir,step + '_GM_singal_dense_1_1_1.csv')\n MultiModelEvaluate(ImgArray, list_ID, dense_list, GM_singal_dense_1_1_1, target_names, outCSV)\n\n # dense 5_1_5\n GM_singal_dense_5_1_5 = list()\n dense_list = ['dense']*6\n rootdir = '/media/data/yuzhao/project/PD_PET_Classify_GM/code_classify_dense/tempData'\n for i in range(6):\n GM_singal_dense_5_1_5.append(os.path.join(rootdir,'fold_{0}'.format(i),\n 'restore_2019_03_06','dense_5_1_5','Weights.h5')) \n outCSV = os.path.join(dataDir,step + '_GM_singal_dense_5_1_5.csv')\n MultiModelEvaluate(ImgArray, list_ID, dense_list, GM_singal_dense_5_1_5, target_names, outCSV)\n\n # ensemble 1_1_1\n ensemble_list = resori_list + dense_list\n GM_Ensemble_resoriDese_1_1_1 = GM_singal_resori_1_1_1 + GM_singal_dense_1_1_1\n outCSV = os.path.join(dataDir,step + '_GM_Ensemble_resoriDese_1_1_1.csv')\n MultiModelEvaluate(ImgArray, list_ID, ensemble_list, GM_Ensemble_resoriDese_1_1_1, target_names, outCSV)\n\n # ensemble 5_1_5\n ensemble_list = resori_list + dense_list\n GM_Ensemble_resoriDese_5_1_5 = GM_singal_resori_5_1_5 + GM_singal_dense_5_1_5\n load_weight_dir = os.path.join(tempStore, 'Weights.h5')\n outCSV = os.path.join(dataDir,step + '_GM_Ensemble_resoriDese_5_1_5.csv')\n MultiModelEvaluate(ImgArray, list_ID, ensemble_list, GM_Ensemble_resoriDese_5_1_5, target_names, outCSV)\n","sub_path":"finalEvaluation.py","file_name":"finalEvaluation.py","file_ext":"py","file_size_in_byte":7469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"577267938","text":"import sys\nfrom panda import *\n\n\nwin ='640 480'\n\nm = MeshData()\n\nm.add_face((1,10,1),(1,9,-1),(-1,7,-1),(-1,11,1))\n\noffscreen = True\napp = PandaApp(offscreen,win=win)\napp.reset_render_nodes()\napp.build(m,'../../SimResources/Polligon/CityStreetAsphaltGeneric008.png',POLY4(),is_2sides=True)\n\napp.camera.setHpr(2,3,5.5)\napp.camera.setPos(-1,-10,-2)\napp.camLens.setFov(90)\n\nif offscreen:\n for i in range(2):\n app.graphicsEngine.render_frame()\n file_name = 'tmp'+str(i)+'.jpg'\n app.win.saveScreenshot(Filename(file_name))\nelse:\n app.run()\n","sub_path":"Jeries/MM_libperception/Eval/impandaM/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"89898895","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nfrom leetcode.Util import ListNode\n\nfrom leetcode import Util\n\n\nclass Solution:\n def nextLargerNodes(self, head: ListNode):\n stack = []\n res = []\n cur, i = head, 0\n while cur:\n while stack and stack[-1][1] < cur.val:\n res[stack.pop()[0]] = cur.val\n stack.append((i, cur.val))\n i += 1\n cur = cur.next\n res.append(0)\n return res\n\n\ns = Solution()\nprint(s.nextLargerNodes(Util.createListNode([2, 7, 4, 3, 5])))\nprint(s.nextLargerNodes(Util.createListNode([1, 7, 5, 1, 9, 2, 5, 1])))\n","sub_path":"leetcode/2021/next-greater-node-in-linked-list.py","file_name":"next-greater-node-in-linked-list.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"599555690","text":"# Quantum Gates Library\n\n# Import necessary libraries\nimport math\nimport cmath\nimport random\nimport numpy as np\n\n# Function to flip a bit\ndef flipBit(num, bit):\n\t# num -- the number whose bit gets flipped\n\t# bit -- the bit that gets flipped\n\tmask = 1 << (bit)\t# generate a mask for bit\n\treturn num ^ mask # XOR the mask and num\n\n# Flip 2 items in an array,\n# 1st specified by loc\n# 2nd by flipBit(loc, bit)\ndef swap(amp, loc, bit):\n\t# amp -- array of data\n\t# loc -- index of 1st item\n\t# bit -- used to calculate index of 2nd item\n\ttmp = amp[loc] # store amp[loc] in tmp\n\tinvert = flipBit(loc, bit) # find the inverted location\n\tif invert > loc:\n\t\tamp[loc] = amp[invert] # set amp[invert] to amp[loc] \n\t\tamp[invert] = tmp # set amp[invert] to tmp (the old amp[loc])\n\treturn amp\n\n# Function used in Grovers Algorithm \ndef FGA(qi):\n\t# qi -- value to check\n\tif qi == 2: # check if qi is 2\n\t\treturn 1 # if so return 1\n\treturn 0 # otherwise return 0\n\t\n# Function to find the period of with Shor's Algorithm \t\ndef FSA(qi):\n\t# qi -- input to function\n\treturn qi % 4 # return qi mod 4\n\t\n# Function that rounds all items in an array \n# to the nearist simple decimial\ndef Round(amp):\n\t# amp -- array to be rounded\n\tfor i in range(0, len(amp)): # loop through all items in array\n\t\tif cmath.polar(amp[i])[0] < 0.000001 and cmath.polar(amp[i])[0] > -0.000001: # check if item is between 0.000001 and -0.000001\n\t\t\tamp[i] = complex(0, 0) # if so set item to 0\n\t\telif cmath.polar(amp[i])[0] < 1.000001 and cmath.polar(amp[i])[0] > 0.999999: # check if item is between 1.000001 and 0.999999\n\t\t\tamp[i] = cmath.rect(1, cmath.polar(amp[i])[1]) # if so set item to 1\n\t\telif cmath.polar(amp[i])[0] < 0.5 and cmath.polar(amp[i])[0] > 0.499999: # check if item is between 0.5 and 0.499999\n\t\t\tamp[i] = cmath.rect(0.5, cmath.polar(amp[i])[1]) # if so set item to 0.5\n\treturn amp # return the new array\n\t\n# NOT gate (flips the amplitudes of all pairs)\ndef NOT(amp, qi):\n\t# amp -- array of data\n\t# qi -- qubit to be NOT'd\n\t# maski -- binary mask used to only apply NOT's when the control bit is 0\n\tmaski = 1 << qi\n\tfor i in xrange(0, len(amp)): # loop through all inputs\n\t\tif i & maski == 0: # check whether bit qi in i is 0\n\t\t\tamp = swap(amp, i, qi) # swap items in amp\n\treturn amp # return the new array\n\n# CNOT gate (flips the amplitude of a pair if a selected bit is 1)\ndef CNOT(amp, qi, qj):\n\t# amp -- array of data\n\t# qi -- controller bit\n\t# qj -- qubit to be NOT'd\n\t# maski -- binary mask used to only apply CNOT's when the control bit is 1\n\tmaski = 1 << qi\n\tfor i in xrange(0, len(amp)): # loop through all inputs\n\t\tif i & maski != 0: # check if control bit (qi) is one\n\t\t\tamp = swap(amp, i, qj) # if so swap that bit and its inverse\n\treturn amp # return the new array\n\n# CCNOT gate (flips the amplitude of a pair if the selected bits are both 1)\ndef CCNOT(amp, qi, qj, qk):\n\t# amp -- array of data\n\t# qi -- 1st controller bit\n\t# qj -- 2nd controller bit\n\t# qk -- qubit to be NOT'd\n\t# maski -- binary mask used to only apply CCNOT's when the control bits are 1\n\t# maskj -- binary mask used to only apply CCNOT's when the control bits are 1\n\tmaski = 1 << qi\n\tmaskj = 1 << qj\n\tfor i in xrange(0, len(amp)): # loop through all inputs\n\t\tif i & maski != 0 and i & maskj != 0: # check if control bits qi and qj are one\n\t\t\tif i & maskj == 0: # check whether bit qi in i is 0\n\t\t\t\tamp = swap(amp, i, qk) # if so swap that bit and its inverse\n\treturn amp # return the new array\n\t\n# Hadamard gate (like a COIN gate but with negative amplitudes)\ndef Hadamard(amp, qi):\n\t# amp -- array of data\n\t# maski -- binary mask used to only apply Hadamards's when the control bit is 0\n\t# sqrtTwo -- variable to store the square root of 2\n\t# qi -- qbit to apply the Hadamard gate to\n\tsqrtTwo = 1.4142135623730951\n\tmaski = 1 << qi\n\tfor i in range(0, len(amp)): # loop through all inputs\n\t\tif i & maski == 0: # check if control bit is 0\n\t\t\tflip = flipBit(i, qi) # get the flipped location\n\t\t\ta = amp[i] # store amp[i] in a\n\t\t\tb = amp[flip] # store amp[flip] in b\n\t\t\tamp[i] = (1/sqrtTwo) * (a+b) # set amp[i] to 1/sqrt2 * (a+b)\n\t\t\tamp[flip] = (1/sqrtTwo) * (a-b)# set amp[flip] to 1/sqrt2 * (a-b)\n\treturn amp\n\t\n# Z-NOT gate (inverts the the amplitude of all qbits except the first one) \ndef ZNOT(amp):\n\t# amp -- array of data\n\tfor i in range(1, len(amp)): # loop through all inputs\n\t\tamp[i] = amp[i]*-1 # invert\n\treturn amp\n\t\n# Oracle gate (used in grovers algorithm)\ndef OracleGA(amp):\n\t# amp -- array of data\n\tfor i in range(0, len(amp)): # loop through all inputs\n\t\tif FGA(i) == 1: # if FGA(i) is 1\n\t\t\tamp[i] = amp[i] * -1 # multiply the amplitude by -1\n\treturn amp\n\t\n# Grover Diffusion\ndef GroverDiffusion(amp):\n\t# amp -- array of data\n\tfor i in range(0, int(math.log(len(amp), 2))): # loop through all qbits\n\t\tHadamard(amp, i) # apply a hadamard gat\n\tZNOT(amp) # apply a ZNOT gate\n\tfor i in range(0, int(math.log(len(amp), 2))): # loop through all qbits\n\t\tHadamard(amp, i)\n\treturn amp # apply a ZNOT gate\n\t\n# Omega gate (phase shift gate)\ndef Omega(qi, qj, pwr, amp, num):\n\t# amp -- array of data\n\t# qi -- controller bit\n\t# qj -- qubit to be shifted\n\t# maski -- binary mask used to only apply shifts when the control bit is 1\n\t# pwr -- number used as exponent\n\tmaski = 1 << qi\n\tmaskj = 1 << qj\n\tN = float(2 ** num) # store the length of amp in num\n\t#print \"qi = %d, qj = %d, pwr = %d\" % (qi, qj, pwr)\n\tfor i in range(0, len(amp)):\n\t\tif i & maski != 0 and i & maskj != 0: # check if control bit (qi) is one\n\t\t\tcomp = cmath.polar(amp[i]) # store amp[qj] in comp\n\t\t\tshift = pwr/N # store the amount to turn by in shift \n\t\t\t#print i, shift\n\t\t\tshift *= cmath.pi * 2\n\t\t\tamp[i] = cmath.rect(comp[0], comp[1] + shift) # store comp in amp[qj]\n\treturn amp # output amp\n\t\n# Hadamard Gate over Z to the n\ndef HZn(amp, num):\n\t# amp -- array of data\n\t# num -- number of QBits of apply over\n\tfor i in range(num, 0, -1): # loop backwards from num to 0\n\t\tHadamard(amp, i-1) # apply a Hadamard Gate to the QBit i-1\n\t\tcheck = 1 # set check to 1\n\t\tfor j in range(num, num-(i-1), -1): # loop backwards from num to num-(i-1)\n\t\t\tamp = Omega(i-check-1, i-1, -(2**(j-2)), amp, num) # apply an Omega gate on QBit i with i-check-1 as the controller bit and -(2**(j-2)) as the exponent\n\t\t\tcheck = check + 1 # increase check by 1\n\treturn amp\n\t\n# Oracle gate (used in Shor's algorithm)\ndef OracleSA(amp, n):\n\tfor i in range(0, 2**n): # loop from 0 to 2 to the n\n\t\toutput = FSA(i) << n # set output to the result of FSA(i) shifted n bits to the right\n\t\ttmp = amp[i] # store amp[i] in tmp\n\t\tamp[i] = amp[output+i] # set amp[i+output] to amp[i] \n\t\tamp[output+i] = tmp # set amp[i+output] to tmp (the old amp[i])\n\treturn amp # return amp\n\t\n# Measurement gate (used to measure a single QBit)\ndef Measure(amp, qi, times):\n\t# amp -- array of data\n\t# qi -- QBit to be measured\n\t# maski -- binary mask used to check if the control bit is 1\n\tmaski = 1 << qi\n\tZero = 0\n\tOne = 0\n\tfor i in range(0, len(amp)): # loop through all amplitudes\n\t\tif i & maski != 0: # check if amp[i] is 0\n\t\t\tOne = One + cmath.polar(amp[i])[0]**2 # add amp[i] squared to One\n\t\telse: # otherwise\n\t\t\tZero = Zero + cmath.polar(amp[i])[0]**2 # add amp[i] squared to Zero\n\tData = [0,0] # create a variable to store data from random numbers\n\ttest = random.uniform(0.0,1.0) # get a random number between 0 and 1\n\tif test < Zero: # check if test is less than 0\n\t\tstate = 0 # set state to 0\n\telse: # otherwise\n\t\tstate = 1 # set state to 1\n\tnorm = 0 # set norm to 0\n\tif state == 1: # check if state is 1\n\t\tfor i in range(0, len(amp)): # loop through amp\n\t\t\tif i & maski != 0: # check if qi is 1 in i\n\t\t\t\tnorm = norm + (cmath.polar(amp[i])[0])**2 # add the length of amp[i] squared to norm\n\tif state == 0: # check if state is 0\n\t\tfor i in range(0, len(amp)): # loop through amp\n\t\t\tif i & maski != 0: # check if qi is 1 in i\n\t\t\t\tpass # do nothing\n\t\t\telse: # otherwise\n\t\t\t\tnorm = norm + (cmath.polar(amp[i])[0])**2 # add the length of amp[i] squared to norm\n\tnorm = 1/norm # set norm to divied 1 by norm\n\tnorm = math.sqrt(norm) # set norm to the square root of norm\n\tif state == 0: # if state is 0\n\t\tfor i in range(0, len(amp)): # loop throught everything in amp\n\t\t\tif i & maski != 0: # check if qi is 1 in i\n\t\t\t\tamp[i] = complex(0.0, 0.0) # set amp[i] to complex 0\n\tif state == 1: # if state is 1\n\t\tfor i in range(0, len(amp)): # loop throught everything in amp\n\t\t\tif i & maski != 0: # check if qi is 1 in i\n\t\t\t\tpass # do nothing\n\t\t\telse: # otherwise\n\t\t\t\tamp[i] = complex(0.0, 0.0) # set amp[i] to complex 0\n\tfor i in range(0, len(amp)): # loop throught everything in amp\n\t\tamp[i] = amp[i] * norm # set amp to amp times norm\n\treturn amp # return amp\n\t\n\t\ndef Rotate(amp, rot):\n\tr = rot[0]\n\tmatrix = np.array([[math.cos(r), math.sin(r)], [math.sin(r), -(math.cos(r))]])\n\tfor i in range(1, len(rot)):\n\t\tr = rot[i]\n\t\tm = np.array([[math.cos(r), math.sin(r)], [math.sin(r), -(math.cos(r))]])\n\t\tmatrix = np.outer(matrix, m)\n\treturn np.inner(matrix, amp)","sub_path":"quantum-sim-flask/QuantumGates.py","file_name":"QuantumGates.py","file_ext":"py","file_size_in_byte":8928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"164892825","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 12 04:45:52 2020\n\n@author: nishantn\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error\n\n\n# Only 1 dependant and 1 independant variable\ndef simple_linear_regression(X_train, X_test, y_train, y_test) :\n regressor = LinearRegression()\n regressor.fit( X_train, y_train )\n y_pred = regressor.predict( X_test )\n mse = mean_squared_error( y_test, y_pred )\n # plt.scatter(X_train, y_train, color = 'red')\n # plt.plot(X_train, regressor.predict(X_train), color = 'blue')\n # plt.title('Salary vs Experience (Training set)')\n # plt.xlabel('Years of Experience')\n # plt.ylabel('Salary')\n # plt.show()\n\n # plt.scatter(X_test, y_test, color = 'red')\n # plt.plot(X_train, regressor.predict(X_train), color = 'blue')\n # plt.title('Salary vs Experience (Test set)')\n # plt.xlabel('Years of Experience')\n # plt.ylabel('Salary')\n # plt.show()\n print(\"Inner\")\n print(y_pred)\n return pd.DataFrame(y_pred), pd.DataFrame(regressor.predict( X_train )), mse\n\n\ndef multiple_linear_regression(X_train, X_test, y_train, y_test) :\n regressor = LinearRegression()\n regressor.fit( X_train, y_train )\n y_pred = regressor.predict( X_test )\n mse = mean_squared_error( y_test, y_pred )\n return y_pred, mse\n\n\nfrom sklearn.preprocessing import PolynomialFeatures\n\n\n# Takes entire dataset (No train-test split)\n# Only 1 dependant and 1 independant variable\ndef poly_regression(X, y) :\n poly_reg = PolynomialFeatures( degree=5 )\n X_poly = poly_reg.fit_transform( X )\n lin_reg = LinearRegression()\n lin_reg.fit( X_poly, y )\n\n # y_pred = lin_reg.predict(poly_reg.fit_transform(X_test))\n # mse = mean_squared_error(y_test,y_pred)\n # print(\"MSE at i = \",i,\" is \",mse)\n\n # plt.scatter(X, y, color = 'red')\n # plt.plot(X, lin_reg.predict(poly_reg.fit_transform(X)), color = 'blue')\n # plt.title('Truth or Bluff (Polynomial Regression)')\n # plt.xlabel('Position level')\n # plt.ylabel('Salary')\n # plt.show()\n return lin_reg.predict( poly_reg.fit_transform( X ) )","sub_path":"backend/nocodeML/algorithms/Regression.py","file_name":"Regression.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"126237833","text":"# packages\nfrom parflowio.pyParflowio import PFData\nimport rasterio as rio\nimport numpy as np\nimport xarray as xr\nfrom pfpostproc.attrs import *\nimport pandas as pd\nfrom datetime import datetime,timedelta\nfrom memory_profiler import profile\n\n# open pfb, return array\ndef pfb_arr(fpath):\n pfb = PFData(fpath)\n pfb.loadHeader();\n pfb.loadData();\n arr = pfb.copyDataArray()\n arr = arr.squeeze()\n pfb.close()\n return arr\n\n# open raster, return array\ndef raster_arr(fpath):\n raster = rio.open(fpath)\n arr = raster.read(1).astype(float)\n arr = np.flip(arr,axis=0)\n return arr\n\ndef write_gage_csv(fpath,wy,outdir,outname):\n # read tab delimited txt file\n df1 = pd.read_csv(fpath,\n sep='\\t',\n names=['agency',\n 'siteID',\n 'datetime',\n 'timezone',\n 'cfs',\n 'code'])\n df1['m3h'] = df1.cfs * 101.9406477312\n df1['datetime'] = df1['datetime'].apply(lambda x: datetime.strptime(x,'%Y-%m-%d %H:%M'))\n gdict = dict(zip(df1.datetime, df1.m3h))\n # account for missing timesteps\n df2 = pd.DataFrame()\n df2['datetime'] = np.arange(datetime.strptime(f'{wy-1}-10-01 00:00', '%Y-%m-%d %H:%M'),\n datetime.strptime(f'{wy}-10-01 00:00', '%Y-%m-%d %H:%M'),\n timedelta(hours=1)).astype(datetime)\n df2['m3h'] = df2['datetime'].map(gdict)\n # write to csv\n arr = df2.m3h.values\n np.savetxt(outdir + outname + '.csv', arr, delimiter=',', fmt='%f')\n\n# add johnson creek stream gage data\ndef add_johnsoncreek(fpath):\n arr = np.loadtxt(fpath)\n da = xr.DataArray(\n arr,\n dims = 't',\n name = 'johnsoncreek')\n return da\n\n# add krassel stream gage data\ndef add_krassel(fpath):\n arr = np.loadtxt(fpath)\n da = xr.DataArray(\n arr,\n dims = 't',\n name = 'krassel')\n return da\n ","sub_path":"pfpostproc/pfpostproc/func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"499217268","text":"# coding=utf-8\n# /usr/bin/python\n# coding=utf-8\n# create by 15025463191 2017/10/30\n# -*- coding: utf-8 -*\nfrom random import seed, randint\n\n\ndef strtoAscii(strindex):\n \"\"\"字符串转16进制\"\"\"\n hexlist = []\n for i in strindex:\n number = ord(i)\n hexnumber = hex(number)\n # print hexnumber\n hexlist.append(number)\n return hexlist\n\n# def stringXOR(stringlist):\n# \"\"\"XOR校验\"\"\"\n# number = 0x00\n# for i in stringlist:\n# number ^= i\n# hexxor = hex(number)\n# hexxorlist = [\"0x00\", hexxor]\n# return hexxorlist\n#\n#\n# a=\"adsa\"\n#\n# print stringXOR(a)\n","sub_path":"ChangeTo/change.py","file_name":"change.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"609591979","text":"#CLASS_dictionary.py\n\n# Dictionary class\n\n\"\"\"\n This version of the dictionary creates bag of words with both word ngrams and char ngrams\n\n\"\"\"\n\nfrom sklearn.feature_extraction.text import CountVectorizer\n\nimport numpy as np\nimport random, re, time, os\nfrom conllu import parse\n\n\nclass Dictionary:\n def __init__(self, ngrams, mincount, bucket, run):\n self.run_number = run\n self.ngrams = ngrams\n self.mincount = mincount\n self.bucket = bucket\n \n #TETON = False\n TETON = True # WORKING ON TETON OR NOT\n if TETON == True:\n self.file_train = open('/project/lsrtwitter/mcooley3/data/twitter_race_1.train',encoding='utf8').readlines()\n self.file_test = open('/project/lsrtwitter/mcooley3/data/twitter_race_1.test',encoding='utf8').readlines() \n self.raw_file_aa = open('/project/lsrtwitter/mcooley3/bias_vs_labelefficiency/TwitterAAE-UD-v1/aa250_gold.conllu', \n encoding='utf8').read()\n self.raw_file_wh = open('/project/lsrtwitter/mcooley3/bias_vs_labelefficiency/TwitterAAE-UD-v1/wh250_gold.conllu', \n encoding='utf8').read()\n self.index_Rval = '/project/lsrtwitter/mcooley3/bias_vs_labelefficiency/indices_Rval_RACE/'\n self.index_Sval = '/project/lsrtwitter/mcooley3/bias_vs_labelefficiency/indices_Sval_RACE/'\n else:\n self.file_train = open('../../../simple-queries-master_RACE/data/twitter_race_1.train',\n encoding='utf8').readlines()\n self.file_test = open('../../../simple-queries-master_RACE/data/twitter_race_1.test', \n encoding='utf8').readlines() \n \n self.raw_file_aa = open('./TwitterAAE-UD-v1/aa250_gold.conllu', encoding='utf8').read()\n self.raw_file_wh = open('./TwitterAAE-UD-v1/wh250_gold.conllu', encoding='utf8').read()\n \n self.index_Rval = './indices_Rval_RACE/'\n self.index_Sval = './indices_Sval_RACE/'\n \n raw_aa = self.convert_format(self.raw_file_aa)\n raw_wh = self.convert_format(self.raw_file_wh)\n \n raw_aa_labels = [1.0] * len(raw_aa) # WARNING double check these values\n raw_wh_labels = [0.0] * len(raw_wh)\n \n \n print(\"--------- creating train instances ---------\")\n train_instances, train_labels = self.create_instances_and_labels(self.file_train)\n x_strain, x_sval = self.split_Strain_Sval(train_instances)\n y_strain, y_sval = self.split_Strain_Sval(train_labels)\n self.n_strain = len(x_strain)\n self.n_sval = len(x_sval)\n \n # lm = {'w': 0, 'b': 1, 'W': 0, 'B': 1}\n print(\"Num 0 instances (w): \", train_labels.count(0), \" Num 1 instances (aa): \", train_labels.count(1))\n print(\"x_strain: \", self.n_strain, \" x_sval: \", self.n_sval)\n print(\"y_strain: \", len(y_strain), \" y_sval: \", len(y_sval))\n print()\n \n print(\"---------- creating manual instances ---------\")\n manual_instances, y_manual = self.combine_manual_race(raw_aa, raw_wh, raw_aa_labels, raw_wh_labels)\n x_rtest, x_rval = self.split_Rtest_Rval(manual_instances)\n y_rtest, y_rval = self.split_Rtest_Rval(y_manual)\n self.n_rtest = len(x_rtest)\n self.n_rval = len(x_rval)\n print(\"x_rtest: \", self.n_rtest, \" x_rval: \", self.n_rval)\n print(\"y_rtest: \", len(y_rtest), \" y_rval: \", len(y_rval))\n print()\n \n print(\"--------- creating testing instances ---------\")\n x_stest, y_stest = self.create_instances_and_labels(self.file_test)\n self.n_stest = len(x_stest)\n print(\"Num 0 instances (w): \", y_stest.count(0), \" Num 1 instances (aa): \", y_stest.count(1))\n print(\"x_stest: \", self.n_stest)\n print()\n \n \n # -----------------------------------------------------\n \n self.nclasses = len(set(train_labels))\n \n print(\"Creating bag-of-n-grams\")\n self.X_STRAIN = self.create_initial_bagngrams(x_strain)\n self.X_SVAL = self.create_bagngrams(x_sval)\n self.Y_STRAIN = self.create_label_vec(self.n_strain, self.nclasses, y_strain)\n self.Y_SVAL = self.create_label_vec(self.n_sval, self.nclasses, y_sval)\n \n self.X_RTEST = self.create_bagngrams(x_rtest)\n self.X_RVAL = self.create_bagngrams(x_rval)\n self.Y_RTEST = self.create_label_vec(self.n_rtest, self.nclasses, y_rtest)\n self.Y_RVAL = self.create_label_vec(self.n_rval, self.nclasses, y_rval)\n \n self.X_STEST = self.create_bagngrams(x_stest)\n self.Y_STEST = self.create_label_vec(self.n_stest, self.nclasses, y_stest)\n \n self.nwords = self.X_STRAIN.shape[1]\n \n \n def convert_format(self, raw_file):\n documents = []\n sentences = parse(raw_file)\n for sent in sentences:\n new_sent = ''\n for word in sent:\n new_sent = new_sent + ' ' + word['form']\n \n documents.append(new_sent)\n return documents\n \n \n def combine_manual_race(self, raw_aa, raw_wh, raw_aa_labels, raw_wh_labels):\n full_man_race = raw_aa + raw_wh\n full_man_race_labels = raw_aa_labels + raw_wh_labels\n \n return full_man_race, full_man_race_labels\n \n \n def split_Strain_Sval(self, train_set):\n for filename in os.listdir(self.index_Sval):\n if '_'+str(self.run_number)+'.txt' in filename:\n subset = np.loadtxt(self.index_Sval+filename, dtype=np.object)\n \n subset = subset.astype(int).tolist() \n sval = [train_set[i] for i in subset]\n strain = [element for i, element in enumerate(train_set) if i not in subset]\n return strain, sval\n \n \n def split_Rtest_Rval(self, _set):\n for filename in os.listdir(self.index_Rval):\n if '_'+str(self.run_number)+'.txt' in filename:\n subset = np.loadtxt(self.index_Rval+filename, dtype=np.object)\n \n subset = subset.astype(int).tolist() \n rval = [_set[i] for i in subset]\n rtest = [element for i, element in enumerate(_set) if i not in subset]\n return rtest, rval\n \n \n # adds each instance a separate element in list\n # each 'tweet' is separated by tab\n def create_instances_and_labels(self, subset):\n words = []\n labels = []\n documents = []\n whitelist = set('abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ 0123456789 \\t \\n')\n \n for x in subset[0:-1]:\n inst = ''\n label = x[0:10]\n \n if label[0:9] != '__label__':\n print(\"ERROR in label creation. label: \", label)\n break\n else:\n labels.append(float(label[-1]))\n \n inst = x[10:]\n \n documents.append(inst)\n\n print(len(documents), \" total instances\")\n return documents, labels\n \n \n def words_and_char_ngrams(self, text):\n words = re.findall(r'\\w{6,}', text) # {3,} \"3 OR MORE\"\n for w in words:\n numgrams = 3\n yield w\n while numgrams > 1:\n for i in range(len(w) - numgrams):\n yield w[i:i+numgrams]\n numgrams -= 1\n\n\n def create_initial_bagngrams(self, x_train): \n #self.vectorizer = CountVectorizer(ngram_range=(1,self.ngrams), min_df=self.mincount, max_features=self.bucket)\n #data_features = self.vectorizer.fit_transform(x_train) \n \n #self.vectorizer = CountVectorizer(ngram_range=(1,1), min_df=self.mincount)\n #data_features = self.vectorizer.fit_transform(x_train) \n \n #********\n self.vectorizer = CountVectorizer(analyzer=self.words_and_char_ngrams, ngram_range=(1,self.ngrams), max_features=self.bucket)\n data_features = self.vectorizer.fit_transform(x_train)\n \n return data_features\n \n \n def create_bagngrams(self, instances):\n return self.vectorizer.transform(instances) \n\n\n def create_label_vec(self, ninstances, nclasses, y):\n labels = np.zeros((ninstances, nclasses))\n \n n_males = 0\n n_females = 0\n \n i = 0\n for label in labels:\n if y[i] == 0:\n label[0] = 1.0\n n_males += 1 #NOTE: need to double check \n elif y[i] == 1:\n label[1] = 1.0\n n_females += 1 #NOTE: need to double check \n \n i += 1\n \n return labels #, n_males, n_females\n \n \n \n \n \n \n \n \n \n \n\n \n","sub_path":"CLASS_dictionary_RACE.py","file_name":"CLASS_dictionary_RACE.py","file_ext":"py","file_size_in_byte":8839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"605522207","text":"from flask import Blueprint, render_template, redirect, url_for\nfrom zroan.deploy.models import *\nimport os\n\n\ndeploy = Blueprint('deploy', __name__)\n\n\n@deploy.route('/')\ndef dep_service():\n title = '服务部署列表'\n deploy_detail = DeployService.query.all()\n\n return render_template('deploy/deploy-service.html', title=title, deploy_detail=deploy_detail)\n\n\n@deploy.route('/deploy/add/')\ndef dep_service_add():\n title = '项目配置'\n return render_template('deploy/add-service.html', title=title)\n\n\n@deploy.route('/deploy/del//')\ndef dep_service_del(id):\n # host_del = db.session.query(HostAsstes).filter(HostAsstes.id == id).one()\n # db.session.delete(host_del)\n # db.session.commit()\n # return redirect(url_for('deploy.dep_service'))\n pass\n\n\n@deploy.route('/git///')\ndef dep_git_pull(names, branch):\n \"\"\"\n 获取 git 最新版本\n :param detail:\n :return:\n \"\"\"\n try:\n if names is not None:\n detail = db.session.query(DeployService).filter(DeployService.name == names, DeployService.current_version == branch).one()\n\n os.chdir('/Users/maql/deploy_data/webapp/%s' % detail.dep_name)\n os.system('sudo git reset --hard origin/master') # 回到未提交的状态\n os.system('sudo git clean -f') # 清除未在版本库中的文件\n os.system('sudo git pull')\n os.system('sudo git pull')\n\n '''Rsync data to clent server'''\n os.system('rsync -avh --delete -e \"ssh -p %s\" ./* root@%s:/data/wwwroot/%s/' % (detail.dep_ssh_port, detail.dep_ip, detail.dep_name))\n\n return redirect(url_for('deploy.dep_service'))\n else:\n print('==========>>>>> Error is fun dep_git_pull')\n\n except Exception as e:\n print(e)\n return 1\n\n#\n# def git_rsync_copy(names):\n# pass\n# #\n# # '''git_log_out'''\n# # git_log_out = os.popen('git log --pretty=oneline -1')\n# # git_log = git_log_out.readlines()\n# # git_ver = git_log[0].split()[0]\n# # git_comment = git_log[0].split()[1:]\n# # # print(git_comment, git_ver)\n# #\n# # '''commit to data is database'''\n# # deploy = DeployService()\n# # deploy.dep_comment = git_comment\n# # deploy.current_version = git_ver\n# # deploy.last_time = datetime.now()\n# # deploy.build_status = '部署成功'\n# # deploy.save()\n# # db.session.add_all([deploy])\n# # db.session.commit()\n","sub_path":"zroan/deploy/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"120748588","text":"# transformation to comp domain -- range of [k_bar, k_up]\ndef box_to_cube(knext=[], params=None):\n n = len(knext)\n knext_box = knext[0:n]\n knext_dummy = knext[0:n]\n\n scaling_dept = (params.range_cube / (params.k_up - params.k_bar)\n ) #scaling for kap\n\n #transformation onto cube [0,1]^d\n for i in range(n):\n #prevent values outside the box\n if knext[i] > params.k_up:\n knext_dummy[i] = params.k_up\n elif knext[i] < params.k_bar:\n knext_dummy[i] = params.k_bar\n else:\n knext_dummy[i] = knext[i]\n #transformation to sparse grid domain\n knext_box[i] = (knext_dummy[i] - params.k_bar) * scaling_dept\n\n return knext_box\n","sub_path":"models/simon_growth/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"358860522","text":"# Server\nfrom socket import *\nimport time\nimport threading\nimport selectors\nimport binascii\nfrom scapy.arch import get_if_addr\nimport netifaces as ni\n\nunder10sec = True\nclientSocketList = []\ngroup1 = []\ngroup2 = []\ncounter_group1 = 0\ncounter_group2 = 0\ncounter = 0\nbroadcastPort = 13117\nserver_port = 2124\n\n\n\ndef UDPserver():\n '''\n purpose : create UDP socket for server enabling broadcast mode\n :return: UDP socket\n '''\n\n try:\n serverHost = ni.ifaddresses('eth1')[ni.AF_INET][0]['addr']\n except:\n try:\n serverHost = ni.ifaddresses('eth2')[ni.AF_INET][0]['addr']\n except:\n try:\n serverHost = ni.ifaddresses('wlp2s0')[ni.AF_INET][0]['addr']\n except:\n return\n\n # create UDP socket\n serverSocket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)\n\n # Enable broadcasting mode\n serverSocket.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)\n\n print(f'Server started, listening on IP address {serverHost}')\n return serverSocket\n\n\ndef TCPserver():\n '''\n purpose : create TCP socket for server\n :return: TCP socket\n '''\n global server_port\n serverHost = get_if_addr('eth1')\n serverPort = server_port # port number from work assignment\n\n # create TCP welcoming socket\n serverSocket = socket(AF_INET, SOCK_STREAM)\n serverSocket.bind((serverHost, serverPort))\n\n # server begins listening for incoming TCP requests\n serverSocket.listen(1)\n serverSocket.setblocking(True)\n\n return serverSocket\n\n\ndef sendBroadcastOverUDP(start, serverSocket):\n '''\n purpose : send broadcast packets containing offers to connect to the servers TCP socket\n :param start: start time for 10 seconds countdown\n :param serverSocket: servers UDP socket\n :return: None\n '''\n global under10sec\n global broadcastPort\n time.clock() # start counting 10 sec\n elapsed = 0\n while elapsed < 10:\n elapsed = time.time() - start\n msg = hex(0xfeedbeef02084c) # 0xfeedbeef = magic cookie , 0x02 = offer msg , 0x084C = TCP server port (2124)\n serverSocket.sendto(binascii.unhexlify(msg[2:]), ('', broadcastPort))\n time.sleep(1)\n\n under10sec = False\n\n\ndef connectClient(selector, serverSocketTCP):\n '''\n purpose : connect a specific client to the server , register him to the selector and add him to group1 or group2\n :param selector: servers socket selector\n :param serverSocketTCP: servers TCP welcoming socket\n :return: None\n '''\n global group1\n global group2\n global clientSocketList\n global counter\n\n serverSocketTCP.settimeout(10)\n\n # server waits on accept() for incoming requests, new socket created on return\n connectionSocket, addr = serverSocketTCP.accept()\n connectionSocket.setblocking(True)\n\n\n try:\n # get client name\n clientName = connectionSocket.recv(1024).decode('utf-8') # read bytes from socket\n connectionSocket.setblocking(False)\n\n if counter % 2 == 0:\n group1.append(clientName)\n counter += 1\n selector.register(connectionSocket, selectors.EVENT_READ | selectors.EVENT_WRITE, data=1)\n else:\n group2.append(clientName)\n counter += 1\n selector.register(connectionSocket, selectors.EVENT_READ | selectors.EVENT_WRITE, data=2)\n\n clientSocketList.append(connectionSocket)\n\n except Exception as e:\n return\n\n\ndef game(conn, groupNumber):\n '''\n purpose : read clients buffer (get clients keyboard input for game)\n :param conn: clients socket connection from selector\n :param groupNumber: clients group number (1 or 2)\n :return: None\n '''\n global counter_group1\n global counter_group2\n\n try:\n msg = conn.recv(1024)\n if groupNumber == 1:\n counter_group1 += 1\n if groupNumber == 2:\n counter_group2 += 1\n\n\n except Exception as e:\n return\n\n\n\ndef sendStartGameMsg(conn):\n '''\n purpose : send client the message that the game begins including all clients who are participating the game\n :param conn: the clients connection socket\n :return: None\n '''\n global group1\n global group2\n\n group1Name = \"\"\n for name in group1:\n group1Name+=name\n\n group2Name = \"\"\n for name in group2:\n group2Name += name\n\n msg = f'Welcome to Keyboard Spamming Battle Royale.\\n' \\\n f'Group 1\\n:' \\\n f'==\\n' \\\n f'{group1Name}\\n' \\\n f'Group 2:\\n' \\\n f'==\\n' \\\n f'{group2Name}\\n' \\\n f'Start pressing keys on your keyboard as fast as you can!!'\n\n try:\n conn.send(msg.encode('utf-8'))\n except Exception as e:\n return\n\ndef displayWinner(conn):\n '''\n purpose : display the group who won the game\n :param conn: client socket connection\n :return: None\n '''\n global counter_group2\n global counter_group1\n global group1\n global group2\n\n group1Name = \"\"\n for name in group1:\n group1Name += name\n\n group2Name = \"\"\n for name in group2:\n group2Name += name\n\n msg = f'Its a tie ! \\n' \\\n f'Group 1 and Group 2 typed in {counter_group1} characters\\n' \\\n f'Congratulations to you all !'\n\n if counter_group1 > counter_group2:\n msg = f'Game over!\\n' \\\n f'Group 1 typed in {counter_group1} characters\\n' \\\n f'Group 2 typed in {counter_group2} characters\\n' \\\n f'Group 1 wins!\\n' \\\n f'Congratulations to the winners:\\n' \\\n f'==\\n' \\\n f'{group1Name}'\n\n elif counter_group2 > counter_group1:\n msg = f'Game over!\\n' \\\n f'Group 1 typed in {counter_group1} characters\\n' \\\n f'Group 2 typed in {counter_group2} characters\\n' \\\n f'Group 2 wins!\\n' \\\n f'Congratulations to the winners:\\n' \\\n f'==\\n' \\\n f'{group2Name}'\n\n print(msg)\n try:\n conn.send(msg.encode('utf-8'))\n except Exception as e:\n return\n\ndef main():\n\n global group1\n global group2\n global counter_group1\n global counter_group2\n global under10sec\n global clientSocketList\n\n # connect TCP and UDP sockets\n serverSocketUDP = UDPserver()\n serverSocketTCP = TCPserver()\n\n sel = selectors.DefaultSelector()\n sel.register(serverSocketTCP, selectors.EVENT_READ, data=None)\n\n while 1:\n\n start = time.time()\n\n # create thread for UDP\n thread = threading.Thread(target=sendBroadcastOverUDP, args=(start, serverSocketUDP),\n daemon=True)\n # Send offers\n thread.start()\n\n # Register all clients to selector\n while under10sec:\n events = sel.select(timeout=10)\n for key, mask in events:\n if mask & selectors.EVENT_READ and key.data==None:\n connectClient(sel, serverSocketTCP)\n\n # Send the message to start the game\n if len(group1)!=0 or len(group1)!=0:\n events = sel.select()\n for key, mask in events:\n if mask & selectors.EVENT_WRITE:\n sendStartGameMsg(key.fileobj)\n\n # Play game for 10 sec\n start = time.time()\n elapsed = 0\n while elapsed < 10:\n elapsed = time.time() - start\n\n events = sel.select()\n for key, mask in events:\n if mask & selectors.EVENT_READ:\n game(key.fileobj, key.data)\n\n # Display winners\n events = sel.select()\n for key, mask in events:\n if mask & selectors.EVENT_WRITE:\n displayWinner(key.fileobj)\n\n print('Game over, sending out offer requests...')\n # Close all connections to TCP server socket\n for conn in clientSocketList:\n sel.unregister(conn)\n conn.close()\n\n # Reset groups lists\n group1 = []\n group2 = []\n clientSocketList = []\n\n # Reset group counters\n counter_group1 = 0\n counter_group2 = 0\n under10sec = True\n counter = 0\n\n\n\n\nif __name__ == \"__main__\":\n # execute only if run as a script\n main()","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":8324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"210757264","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 12 02:49:35 2017\n\n@author: C\n\"\"\"\nimport sys\nfrom PyQt5.QtGui import * \nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import * \nimport package.sqlTools\n\n\nclass Example(QDialog):\n\n def __init__(self):\n super().__init__()\n\n self.initUI()\n\n\n def initUI(self):\n\n\n \n name = QLabel('选择输出类库:')\n ratio = QLabel('设置子库比例(默认):')\n nameFile = QLabel('设置输出文件名(必填):')\n\n \n \n self.nameEdit = QComboBox()\n for x in package.sqlTools.selcetNameAll('数据总表'):\n self.nameEdit.addItem(x)\n \n self.ratioEdit = QLineEdit()\n self.ratioEdit.setText('3221')\n \n self.nameFileEdit = QLineEdit()\n \n\n\n \n\n\n \n self.okButton = QPushButton('确认')\n self.noButton = QPushButton('取消')\n self.okButton.clicked.connect(self.save)\n self.noButton.clicked.connect(self.noSave)\n \n hbox = QHBoxLayout() \n hbox.addStretch(1) \n hbox.addWidget(self.okButton)\n hbox.addStretch(1) \n hbox.addWidget(self.noButton)\n hbox.addStretch(1) \n\n grid = QGridLayout()\n grid.setSpacing(10)\n grid.addWidget(name, 1, 0)\n grid.addWidget(self.nameEdit, 1, 1)\n grid.addWidget(ratio, 2, 0)\n grid.addWidget(self.ratioEdit, 2, 1)\n grid.addWidget(nameFile, 3, 0)\n grid.addWidget(self.nameFileEdit, 3, 1)\n\n \n vbox = QVBoxLayout() \n vbox.addLayout(grid)\n vbox.addLayout(hbox)\n \n self.setLayout(vbox)\n \n self.resize(600,400)\n self.setWindowTitle('输出排列组合') \n\n \n \n \n def save(self):\n #sqlTools.buildTable(self.nameEdit.text(), int(self.numEdit.text()))\n #print(self.nameEdit.currentText(), self.ratioEdit.text(), self.nameFileEdit.text())\n package.sqlTools.produceFinalData(self.nameEdit.currentText(), self.ratioEdit.text(), self.nameFileEdit.text())\n #sqlTools.produceFinalData()\n self.accept()\n \n \n def noSave(self, event):\n reply = QMessageBox.question(self, ' 信息', ' 你确定要不保存退出吗? ', QMessageBox.Yes, QMessageBox.No)\n if reply == QMessageBox.Yes:\n self.accept()\n else:\n event.ignore()\n \n def closeEvent(self, event):\n #重新定义 colseEvent\n reply = QMessageBox.question(self, ' 信息', ' 你确定要不保存退出吗? ', QMessageBox.Yes, QMessageBox.No)\n if reply == QMessageBox.Yes:\n event.accept()\n else:\n event.ignore()\n \n\n \n \n \n \n \nif __name__ == '__main__':\n\n app = QApplication(sys.argv)\n app.aboutToQuit.connect(app.deleteLater)\n ex = Example()\n ex.show()\n sys.exit(app.exec_())\n","sub_path":"package/dialogOutput.py","file_name":"dialogOutput.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"491591306","text":"#!/usr/bin/env python\n\n\nfrom bs4 import BeautifulSoup\nimport requests\nimport json\n\ncookies = {\n 'PHPSESSID': 'e271e9d6126ffb79bde9b68247e736d7',\n}\n\nheaders = {\n 'Host': 'imei24.com',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:66.0) Gecko/20100101 Firefox/66.0',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Language': 'en-US,en;q=0.5',\n 'Accept-Encoding': 'gzip, deflate',\n 'Referer': 'https://imei24.com/',\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Content-Length': '17',\n 'Connection': 'close',\n 'Upgrade-Insecure-Requests': '1',\n}\n\ndata = 's='\n\nresponse = requests.post('https://imei24.com/checking/', headers=headers, cookies=cookies, data=data)\n#print(response.text)\n\nsoup = BeautifulSoup(response.text, 'html.parser')\n#print(soup)\n\noutput = soup.find('script', type='application/ld+json').text\nprint(output)\n","sub_path":"imei.py","file_name":"imei.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"6464813","text":"\ndef orderMin(a, b):\n if a < b:\n return a \n else:\n return b\n\ndef orderMax(a, b):\n if a > b:\n return a \n else:\n return b \n \ndef addition(zanDni, minimum, maximum):\n n = minimum\n while n < (maximum + 1):\n if not(n in zanDni):\n zanDni.append(n)\n n += 1\n return zanDni\n \ndef saveDay(dni):\n ostatok = 90 - dni\n return ostatok\n\ndef maximumDayVisit(i):\n maxDay = 0 \n for k in i:\n n = orderMax(k[0], k[1])\n if n > maxDay:\n maxDay = n\n return maxDay\n\nprint(\"\"\"\n Вы зашли в 'шенгенский калькулятор'.\n \"\"\")\n\n#прошлые поездки \ni = [[2, 14], [16, 20], [40, 34], [51, 70], [50, 50], [30, 40], [200, 140]]\n#планируемые\n#j = [250, 263]\nstart = \"\"\n\nmaxVizit = maximumDayVisit(i)\n\nwhile start != 'e':\n start = input(\"\"\"\n Для продолжения работы выберите одно из действий:\n o - для вывода всех дней визитов\n v - для добавления визита в историю \n p - для ввода запланированного визита\n r - для удаления визита из истории\n e - для выхода\n \"\"\")\n \n if start == 'e':\n break\n\n try:\n len(zanDni)\n except NameError: \n zanDni = []\n \n for line in i:\n maximum = orderMax(line[0], line[1])\n minimum = orderMin(line[0], line[1])\n \n addition(zanDni, minimum, maximum)\n \n if start == 'o':\n print(zanDni)\n \n if start == 'v':\n delet = []\n openNewVizit = int(input(\"Введите начало нового визита:\"))\n closeNewVizit = int(input(\"Введите конец нового визита:\"))\n delet.append(openNewVizit)\n delet.append(closeNewVizit)\n i.append(delet)\n addition(zanDni, orderMin(openNewVizit, closeNewVizit), orderMax(openNewVizit, closeNewVizit))\n #print(zanDni)\n \n if start == 'r':\n removeOpenVizit = int(input(\"Введите начало удаления визитов:\"))\n removeCloseVizit = 0 \n while removeCloseVizit < removeOpenVizit:\n removeCloseVizit = int(input(\"Введите конец удаления визитов:\"))\n k = 0\n while zanDni:\n try: \n if removeOpenVizit <= zanDni[k] and zanDni[k] <= removeCloseVizit:\n \n zanDni.remove(zanDni[k])\n else:\n k += 1\n except IndexError: \n break\n \n \n if start == 'p': \n ostatok = -1\n while ostatok < 0:\n ostatok = 0\n j = []\n fstPlan = 0\n scndPlan = 0\n \n while fstPlan < maxVizit:\n print('Последний день вашей прошлой поездки был:', maxVizit)\n fstPlan = int(input('Введите начало планируемого визита:'))\n \n while scndPlan < fstPlan:\n scndPlan = int(input('Введите конец планируемого визита:'))\n j.append(fstPlan)\n j.append(scndPlan)\n \n dni = 0\n\n firstDay = orderMin(j[0], j[1]) - 180\n for unit in zanDni:\n if unit >= firstDay:\n dni += 1\n ostatok = saveDay(dni)\n \n print(\"Количество дней, которое вы пробыли в шенгене к началу нового визита:\", dni)\n \n print(\"Количество дней, которое осталось:\", ostatok) \n \n lastDay = orderMax(j[0], j[1]) - 180\n \n newDays = lastDay - firstDay + 1\n print(\"Количество дней, которое вы будете в шенгене:\", newDays)\n \n maximum = orderMax(j[0], j[1])\n minimum = orderMin(j[0], j[1])\n \n addition(zanDni, minimum, maximum)\n saveDay = 0\n usedDays = 0\n for unit in zanDni:\n if unit >= lastDay:\n usedDays += 1\n \n saveDay = 90 - usedDays\n print(\"Количество дней, которое после указанной даты отьезда у вас останется:\", saveDay)\n \n pay = input('На сколько евро в день вы собираетесь жить?')\n \n paymentPlan = int(pay) * newDays\n \n print(\"Вам потребуется\", paymentPlan, \"евро на поездку.\")\n k = 0\n while zanDni:\n try: \n if fstPlan <= zanDni[k] and zanDni[k] <= scndPlan:\n \n zanDni.remove(zanDni[k])\n else:\n k += 1\n except IndexError: \n break\n\n\n\n\n","sub_path":"SchengenСalculator.py","file_name":"SchengenСalculator.py","file_ext":"py","file_size_in_byte":4538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"498486202","text":"import io\nimport re\nimport os\nfrom collections import Counter\nimport json\n\ndef get_tickers(tweet):\n tick_pattern = re.compile(\"\\W\\$(\\w+)\\W\")\n return [x for x in tick_pattern.findall(tweet)]\n\n\nticker_file = \"tickers_lm_data/tweets_tickers.txt\"\nticker_count_file = \"output/tweet_ticker_counts_norm.json\"\nlexicon_file = \"tickers_lm_data/lexicon.txt\"\nticker_count_file_full = \"output/tweet_ticker_counts_full.json\"\ntweet_ticker_dict_intersection = \"output/tweet_ticker_full_lexicon_intersection.txt\"\nlexicon_list_file = \"output/lexicon_list.txt\"\nextra_lexicon_file = \"data/extra_lexicon.txt\"\n\ntry:\n os.mkdir(\"output\")\nexcept:\n pass\n\nfull_ticker_count = Counter()\nticker_count = Counter()\nfor line in open(ticker_file):\n update_tickers = get_tickers(line)\n full_ticker_count.update(update_tickers)\n ticker_count.update([c.lower() for c in update_tickers])\n\nwith io.open(ticker_count_file, 'w') as tf, io.open(ticker_count_file_full, 'w') as ftf:\n json.dump(ticker_count, tf)\n json.dump(full_ticker_count, ftf)\n\nprint(\"# tickers : \", len(full_ticker_count))\nprint(\"# tickers (lc) : \", len(ticker_count))\n\nlexicon_list = []\nwith io.open(lexicon_file) as lf:\n for line in lf:\n lexical_pattern = re.compile(\"[\\w<>']+\")\n word_match = lexical_pattern.match(line)\n if word_match:\n lexicon_list.append(word_match.group().lower())\n\nwith io.open(extra_lexicon_file) as ef:\n lexicon_list += ef.read().split(\"\\n\")\n \nlexicon_list = list(set(lexicon_list))\nprint(\"lexicon count: \", len(lexicon_list))\n\nwith io.open(lexicon_list_file, 'w') as lf:\n lf.write(\"\\n\".join(sorted(lexicon_list)))\n\nticker_elements = ticker_count.keys()\nfull_ticker_elements = full_ticker_count.keys()\nambiguous_tickers = [x for x in ticker_elements if x in lexicon_list]\nambiguous_tickers_full = [x for x in full_ticker_elements if x in lexicon_list]\nprint(\"# ambiguous tickers: \", len(ambiguous_tickers_full))\nprint(\"# ambiguous tickers (lower): \", len(ambiguous_tickers))\nwith io.open(tweet_ticker_dict_intersection, 'w') as intf:\n intf.write(\"\\n\".join(sorted(ambiguous_tickers_full)))\n\n","sub_path":"src/extract_tweet_lexicon_info.py","file_name":"extract_tweet_lexicon_info.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"13188089","text":"from flask import Flask, stream_with_context, request, Response, flash\r\nimport time\r\nfrom time import sleep\r\nfrom flask import render_template, Response, make_response, redirect, url_for\r\n\r\nimport os\r\nimport shutil\r\nimport cv2 as cv\r\n\r\nfrom testing1 import maskdetect\r\nfrom cough2 import coughdetect\r\n\r\nfrom playsound import playsound\r\n\r\napp = Flask(__name__)\r\n\r\nvideo = cv.VideoCapture(0)\r\ncount=1\r\n\r\n@app.route(\"/\")\r\ndef home():\r\n return render_template(\"covid-html.html\")\r\n\r\ninput_dir = './VideoImage'+'/'+'Input'\r\nsaved_dir = './VideoImage'+'/'+'Saved'\r\nmask_dir = './VideoImage'+'/'+'Masked'\r\nunmask_dir = './VideoImage'+'/'+'Unmasked'\r\n\r\ndef gen(video):\r\n while True:\r\n tmpfile_list = os.listdir(input_dir)\r\n infile_list = [file for file in tmpfile_list if file.endswith(\".jpg\")]\r\n# print(infile_list)\r\n\r\n if(len(infile_list) == 0):\r\n sleep(2)\r\n\r\n for i in range(0,len(infile_list)): \r\n infilename = input_dir + '/' + infile_list[i]\r\n\r\n# print(infilename)\r\n image = cv.imread(infilename,1)\r\n\r\n ret, jpeg = cv.imencode('.jpg', image)\r\n frame = jpeg.tobytes()\r\n yield (b'--frame\\r\\n'b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')\r\n sleep(1)\r\n shutil.move(infilename,saved_dir+'/'+infile_list[i])\r\n# shutil.copy(infilename,saved_dir+'/'+infile_list[i])\r\n sleep(1)\r\n tmpfile_list = \"\"\r\n# cv.waitKey(1000)\r\n cv.destroyAllwindows()\r\n\r\n@app.route('/video/feed')\r\ndef video_feed():\r\n global video\r\n return Response(gen(video),\r\n mimetype='multipart/x-mixed-replace; boundary=frame')\r\n\r\ndef detectmask(): \r\n savefile_list = os.listdir(saved_dir)\r\n# print(savefile_list)\r\n if(len(savefile_list) == 0):\r\n sleep(2)\r\n savefile_list = os.listdir(saved_dir)\r\n# print(savefile_list)\r\n\r\n for i in range(0,len(savefile_list)): \r\n filename = saved_dir + '/' + savefile_list[i]\r\n\r\n# print('detectmask:'+filename)\r\n maskdetect(savefile_list[i])\r\n os.remove(filename)\r\n\r\ndef genmask(video): \r\n while True:\r\n mask_list = os.listdir(mask_dir)\r\n# print(mask_list)\r\n if(len(mask_list) == 0):\r\n detectmask()\r\n mask_list = os.listdir(mask_dir)\r\n \r\n for i in range(0,len(mask_list)): \r\n maskname = mask_dir + '/'+mask_list[i]\r\n image = cv.imread(maskname,1)\r\n\r\n ret, jpeg = cv.imencode('.jpg', image)\r\n frame = jpeg.tobytes()\r\n yield (b'--frame\\r\\n'b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')\r\n sleep(1)\r\n shutil.move(maskname,unmask_dir+'/'+mask_list[i])\r\n# os.remove(filename)\r\n cv.waitKey(1000)\r\n cv.destroyAllwindows()\r\n\r\n@app.route('/mask/detect')\r\ndef mask_detect():\r\n global video\r\n return Response(genmask(video),\r\n mimetype='multipart/x-mixed-replace; boundary=frame') \r\n\r\n@app.route('/cough')\r\ndef download_file():\r\n cough_dir ='./VideoImage'+'/'+'Input'\r\n\r\n while True:\r\n tfile_list = os.listdir(cough_dir)\r\n coughfile_list = [file for file in tfile_list if file.endswith(\".wav\")]\r\n# print(coughfile_list)\r\n\r\n if(len(coughfile_list) == 0):\r\n sleep(2)\r\n\r\n for i in range(0,len(coughfile_list)): \r\n audioname = cough_dir + '/' + coughfile_list[i]\r\n coughdetect(audioname)\r\n playsound('cough.wav')\r\n\r\n# os.remove(audioname)\r\n sleep(2)\r\n\r\n return render_template(\"covid-html.html\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(host='0.0.0.0', port=5000, debug=True, threaded=True)\r\n","sub_path":"server/covid19.py","file_name":"covid19.py","file_ext":"py","file_size_in_byte":3757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"262822199","text":"import requests\nimport json\nclass BaseRequest():\n def request_get(self,session,url, data, header=None, cookie=None):\n if header==None and cookie==None:\n res = session.get(url=url,params=data)\n else:\n res = session.get(url=url, params=data, header=header)\n return res\n def request_post(self,session,url, data, header=None, cookie=None):\n if header==None and cookie==None:\n res = session.post(url=url, data=data)\n else:\n res = session.post(url=url, data=data, header=header, cookie=cookie)\n return res\n def run_main(self,session,method,url, data, header=None, cookie=None):\n try:\n res = ''\n if method == 'get':\n res = BaseRequest().request_get(session=session,url=url,data=data,header=header,cookie=cookie)\n elif method == 'post':\n res = BaseRequest().request_post(session=session,url=url, data=data, header=header, cookie=cookie)\n return res\n except Exception as e:\n print('请求主函数调用失败:{}'.format(e))\n def get_token(self,r):\n a = json.loads(r.text)\n token = a.get('data').get('token')\n return token\n header = {'cookie': 'zuulToken=' + token}\nif __name__ == '__main__':\n session = requests.session()\n base_request = BaseRequest()\n base_url = 'https://api-test-admin-zuul.qkduo.cn/'\n login_path = 'auth-center/nologin/login'\n getUserInfo_path = 'auth-center/noauth/getUserInfo'\n login_data = {\n 'email': 'admin@qingclass.com',\n 'password': 'e10adc3949ba59abbe56e057f20f883e'\n }\n getUserInfo_data = {}\n login_send = base_request.run_main(session=session,method='post',url=base_url+login_path, data=login_data)\n token = BaseRequest().get_token(login_send)\n header = {'cookie': 'zuulToken=' + token}\n getUserInfo_send = base_request.run_main(session=session,method='get',url=base_url + getUserInfo_path, data=getUserInfo_data,header=header)\n print(login_send.text)\n print(getUserInfo_send.text)\n","sub_path":"BaseRequest.py","file_name":"BaseRequest.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"181245003","text":"#\n# @lc app=leetcode id=505 lang=python\n#\n# [505] The Maze II\n#\nimport heapq\nDIRECTION = [(0, 1), (0, -1), (1, 0), (-1, 0)]\n\nclass Solution(object):\n def shortestDistance(self, maze, start, destination):\n \"\"\"\n :type maze: List[List[int]]\n :type start: List[int]\n :type destination: List[int]\n :rtype: int\n \"\"\"\n if not maze: return False\n # 基本包\n rows = len(maze)\n cols = len(maze[0])\n\n # min heap: the distance value has to be the first element\n heap = [(0, start[0], start[1])]\n visited = set()\n \n while heap:\n dist, x, y = heapq.heappop(heap)\n # check if visited\n if (x, y) in visited: continue\n visited.add((x, y))\n # check if arrived\n if x == destination[0] and y == destination[1]: return dist\n for d_x, d_y in DIRECTION:\n nxt_x, nxt_y = x + d_x, y + d_y\n cur_dist = dist\n # 每次选一个方向滚到底\n while 0 <= nxt_x < rows and 0 <= nxt_y < cols and \\\n maze[nxt_x][nxt_y] == 0:\n nxt_x += d_x\n nxt_y += d_y\n cur_dist += 1\n\n # x and y locates @ a wall when exiting the above while loop, so we need to backtrack 1 position\n nxt_x -= d_x\n nxt_y -= d_y\n\n # Check if the new starting position has been visited\n if maze[nxt_x][nxt_y] not in visited: \n heapq.heappush(heap, (cur_dist, nxt_x, nxt_y))\n \n return -1\n\n","sub_path":"505.the-maze-ii.py","file_name":"505.the-maze-ii.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"317059370","text":"# -*- coding: utf-8 -*-\nfrom ikm.spiders.newtalk import Spider as myspider\n# from model.config import DBSession\nfrom scrapy.crawler import CrawlerProcess\nfrom scrapy.settings import Settings\n\nsettings = Settings()\n\n# crawl settings\nsettings.set(\"USER_AGENT\", \"Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36\")\nsettings.set(\"ITEM_PIPELINES\" , {\n # 'ikm.pipelines.DuplicatesPipeline': 200,\n # 'ikm.pipelines.RedisPipeline': 300,\n 'ikm.pipelines.DataBasePipeline': 301,\n})\n\nsettings.set(\"DOWNLOADER_MIDDLEWARES\", {\n 'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware' : None,\n 'ikm.useragent.RandomUserAgentMiddleware' :400,\n 'ikm.middleware.SeleniumMiddleware': 543\n})\n\nprocess = CrawlerProcess(settings)\nprocess.crawl(myspider)\nprocess.start()\n","sub_path":"ikm/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"508733851","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 11 16:19:19 2022\n\n@author: tnm12\n\"\"\"\n\nimport numpy as np\nimport scipy.integrate as spi\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MaxNLocator\nfrom matplotlib import cm\nimport time\n\nimport sys\nsys.path.insert(1,'filepath to simulator location on local computer')\nimport ctRSD_simulator_200 as RSDs #import simulator version 2.0.0\n \n##############################################################################\n#Simulations\n##############################################################################\n\nIN_temp1 = [25,10]\n #input templates\nIN_temp2 = [10,25]\n\nREP_con = 500 #reporter concentration\n\nt_sim = np.linspace(0,6,1001)*3600 # seconds\n\n\nmodel1 = RSDs.RSD_sim(7) # define the model instance and # of domains\n\n#specify species invovled in the reaction\nmodel1.molecular_species('CG{6,7}',DNA_con=45)\nmodel1.molecular_species('G{6,2}',DNA_con=15)\nmodel1.molecular_species('G{7,1}',DNA_con=15)\nmodel1.molecular_species('R{1}',ic=REP_con)\nmodel1.molecular_species('R{2}',ic=REP_con)\n\n\nfor n in range(len(IN_temp1)):\n\n model1.molecular_species('I{6}',DNA_con=IN_temp1[n])\n model1.molecular_species('I{7}',DNA_con=IN_temp2[n])\n \n \n # simulate the model\n model1.simulate(t_sim,smethod='BDF') #BDF method is used because of varying time scales\n \n\n # pull out the species from the model solution to plot\n S1 = model1.output_concentration('S{1}')\n S2 = model1.output_concentration('S{2}')\n \n \n plt.subplot(2,4,1+n*2)\n plt.plot(model1.t/60,(S1/REP_con)*100,color='red',linewidth=2,linestyle='--')\n plt.plot(model1.t/60,(S2/REP_con)*100,color='blue',linewidth=2,linestyle='--')\n \n fs = 12\n plt.xticks(fontsize=fs)\n plt.yticks(fontsize=fs)\n plt.ylim(-10,110)\n plt.xlim(0,240)\n ax1 = plt.gca()\n ax1.xaxis.set_tick_params(which='both',size=3,width=1,direction='in',top='on')\n #ax1.xaxis.set_major_locator(MaxNLocator(integer=True))\n ax1.yaxis.set_tick_params(which='both',size=3,width=1,direction='in',right='on')\n plt.xlabel('Time (min)',fontsize=fs)\n plt.ylabel('Reacted reporter (%)',fontsize=fs)\n if n == 0:\n plt.title('I{6} > I{7}')\n else:\n plt.title('I{7} > I{6}')\n plt.legend(['S{1}','S{2}'],frameon=False,fontsize=10)\n \n\nplt.suptitle('CG Simulation')","sub_path":"ctRSD-simulator-2.0/Examples/Advanced Simulator Features/CG_simulations.py","file_name":"CG_simulations.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"560726577","text":"\"\"\"\nPerforms some CoreNLP operations as a proof of concept for the library.\n\"\"\"\n\nimport json\nimport re\n\nfrom stanfordcorenlp import StanfordCoreNLP as CoreNLP\nfrom tatsu import parse\n\n\nclass StanfordCoreNLP:\n\n def __init__(self, analysis_type):\n self.analysis_type = analysis_type\n\n self.proc = CoreNLP('http://localhost', port=9000)\n self.props = {\n 'annotators': 'tokenize, ssplit, pos, lemma, ner, parse, sentiment, dcoref, relation, natlog, openie',\n 'pipelineLanguage': 'en',\n 'outputFormat': 'json',\n 'timeout': 300_000 # 5 minutes in milliseconds\n }\n\n def run(self, data):\n result = self.json_cleanup(data, self.analysis_type)\n # print(result)\n return result\n\n def json_cleanup(self, data, analysis_type):\n \"\"\"\n When the JSON segments return from the CoreNLP library, they\n separate the data acquired from each word into their own element.\n\n For readability's sake, it would be nice to pair all of the information\n for a given word with that word, making a list of words with their\n part of speech tags\n \"\"\"\n for corpus in data:\n res = self.proc.annotate(corpus.contents, properties=self.props)\n res = json.loads(res)\n # print(res)\n sentences = []\n for sentence_res in res['sentences']:\n words = []\n for token in sentence_res['tokens']:\n word = {'token': token['originalText']}\n\n if analysis_type == 'ner':\n word['ner'] = token['ner']\n\n words.append(word)\n\n sentence = {'tokens': words}\n\n if analysis_type == 'sentiment':\n # Add space after 'Very' for consistency and display\n sentence['sentiment'] = sentence_res['sentiment'].replace('Very', 'Very ')\n\n sentence['sentimentValue'] = int(sentence_res['sentimentValue'])\n sentence['tree_json'] = TreeStringToList.convert('sentiment', sentence_res['sentimentTree'])\n\n # Extract sentiments from tree on a per-token level\n value_to_name = {0: 'Very negative', 1: 'Negative', 2: 'Neutral', 3: 'Positive', 4: 'Very positive'}\n sentiment_values = map(int, re.findall(r'\\(\\S+\\|sentiment=(\\d)\\|prob=\\d+\\.\\d+ \\S+\\)',\n sentence_res['sentimentTree']))\n for token, value in zip(words, sentiment_values):\n token['sentiment'] = value_to_name[value]\n\n if analysis_type in ['sentiment', 'relation']:\n sentence['parse'] = re.sub(r'\\s+', ' ', sentence_res['parse'])\n\n if analysis_type == 'relation':\n relations = []\n predicates = dict()\n for relation in sentence_res['openie']:\n new_relation = {\n 'subject': {\n 'lemma': relation['subject'],\n 'start': relation['subjectSpan'][0],\n 'end': relation['subjectSpan'][1]\n },\n 'relation': {\n 'lemma': relation['relation'],\n 'start': relation['relationSpan'][0],\n 'end': relation['relationSpan'][1]\n },\n 'object': {\n 'lemma': relation['object'],\n 'start': relation['objectSpan'][0],\n 'end': relation['objectSpan'][1]\n },\n }\n # Find the relation with longest arguments on left and right hand side\n if relation['relation'] in predicates:\n curr_relation = predicates[relation['relation']]\n curr_length = (curr_relation['subject']['end'] - curr_relation['subject']['start']) + \\\n (curr_relation['object']['end'] - curr_relation['object']['start'])\n\n new_length = (new_relation['subject']['end'] - new_relation['subject']['start']) + \\\n (new_relation['object']['end'] - new_relation['object']['start'])\n\n if new_length > curr_length:\n predicates[relation['relation']] = new_relation\n else:\n predicates[relation['relation']] = new_relation\n\n for pred in predicates.values():\n relations.append(pred)\n\n sentence['relations'] = relations\n\n if analysis_type == 'pos':\n sentence['tree_json'] = TreeStringToList.convert('parse', sentence_res['parse'])\n\n sentences.append(sentence)\n\n if analysis_type == 'coref':\n entities = []\n for entity_id, entity in res['corefs'].items():\n mentions = []\n for instance in entity:\n mention = {\n 'mentionid': instance['id'],\n 'sentence': instance['sentNum'] - 1,\n 'tokspan_in_sentence': [\n instance['startIndex'] - 1,\n instance['endIndex'] - 1\n ],\n 'head': instance['headIndex'] - 1,\n 'mentiontype': instance['type'],\n 'animacy': instance['animacy'],\n 'gender': instance['gender'],\n 'number': instance['number'],\n 'representative': instance['isRepresentativeMention']\n }\n mentions.append(mention)\n\n entities.append({'mentions': mentions, 'entityid': int(entity_id)})\n\n return {'sentences': sentences, 'entities': entities}\n else:\n return {'sentences': sentences}\n\n\nclass TreeStringToList:\n PARSE_GRAMMAR = r'''\n start = node $ ;\n node = '(' pos:tag value:children ')' | '(' pos:tag value:token ')' ;\n children = { node }+ ;\n tag = /\\S+/ ;\n token = /\\S+/ ;\n '''\n\n SENTIMENT_GRAMMAR = r'''\n start = node $ ;\n node = '(' tag:tag value:token ')' | '(' tag:tag value:children ')' ;\n children = { node }+ ;\n tag = pos:pos 'sentiment' sentiment:sentiment 'prob' prob ;\n pos = /\\S+/ ;\n sentiment = /\\d/ ;\n prob = /\\d+\\.\\d+/ ;\n token = /\\S+/ ;\n '''\n\n # Test with these\n # '(ROOT\\n (S\\n (NP (DT The) (JJ quick) (JJ brown) (NN fox))\\n (VP (VBD jumped)\\n (PP (IN over)\\n (NP (DT the) (JJ lazy) (NN dog))))\\n (. .)))'\n # '(ROOT|sentiment=1|prob=0.550\\n (NP|sentiment=2|prob=0.946 (DT|sentiment=2|prob=0.993 The)\\n (@NP|sentiment=2|prob=0.871 (JJ|sentiment=2|prob=0.993 quick)\\n (@NP|sentiment=2|prob=0.697 (JJ|sentiment=2|prob=0.929 brown) (NN|sentiment=2|prob=0.631 fox))))\\n (@S|sentiment=1|prob=0.510\\n (VP|sentiment=2|prob=0.469 (VBD|sentiment=2|prob=0.631 jumped)\\n (PP|sentiment=2|prob=0.620 (IN|sentiment=2|prob=0.991 over)\\n (NP|sentiment=2|prob=0.480 (DT|sentiment=2|prob=0.994 the)\\n (@NP|sentiment=1|prob=0.489 (JJ|sentiment=1|prob=0.716 lazy) (NN|sentiment=3|prob=0.852 dog)))))\\n (.|sentiment=2|prob=0.997 .)))'\n\n @staticmethod\n def convert(result_type, text):\n text = text.replace('(', ' ( ').replace(')', ' ) ')\n if result_type == 'parse':\n text = text[7:-2]\n ast = parse(TreeStringToList.PARSE_GRAMMAR, text)\n tree = TreeStringToList.flatten_parse_ast(ast)\n elif result_type == 'sentiment':\n text = text.replace('|', ' ').replace('=', ' ')\n ast = parse(TreeStringToList.SENTIMENT_GRAMMAR, text)\n tree = TreeStringToList.flatten_sentiment_ast(ast)\n else:\n raise NotImplementedError(f'Analysis type \"{result_type}\" not implemented')\n\n tree.sort(key=lambda x: x['id'])\n return tree\n\n @staticmethod\n def flatten_parse_ast(ast, nodes=None, head=None):\n if nodes is None:\n nodes = []\n if head is None:\n head = 0\n\n prev_ids = [node['id'] for node in nodes]\n node_id = max(prev_ids) + 1 if prev_ids else head + 1\n\n nodes.append({'id': node_id, 'tag': '', 'head': head, 'value': ast['pos']})\n\n if isinstance(ast['value'], str):\n nodes.append({'id': node_id + 1, 'tag': '', 'head': node_id, 'value': ast['value']})\n elif isinstance(ast['value'], list):\n for child in ast['value']:\n TreeStringToList.flatten_parse_ast(child, nodes, node_id)\n else:\n raise RuntimeError(f'Unexpected type \"{ast[\"value\"]}\"')\n\n return nodes\n\n @staticmethod\n def flatten_sentiment_ast(ast, nodes=None, head=None):\n if nodes is None:\n nodes = []\n if head is None:\n head = 0\n\n prev_ids = [node['id'] for node in nodes]\n node_id = max(prev_ids) + 1 if prev_ids else head + 1\n\n nodes.append({'id': node_id, 'tag': int(ast['tag']['sentiment']), 'head': head, 'value': ast['tag']['pos']})\n\n if isinstance(ast['value'], str):\n nodes.append({'id': node_id + 1, 'tag': '', 'head': node_id, 'value': ast['value']})\n elif isinstance(ast['value'], list):\n for child in ast['value']:\n TreeStringToList.flatten_sentiment_ast(child, nodes, node_id)\n else:\n raise RuntimeError(f'Unexpected type \"{ast[\"value\"]}\"')\n\n return nodes\n","sub_path":"linguine/ops/stanford_core_nlp.py","file_name":"stanford_core_nlp.py","file_ext":"py","file_size_in_byte":10007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"411965958","text":"import os\nclass Main:\n\tdef createExpect(self,quality,spawn,**kwargs):\n\t\tif kwargs is not None:\n\t\t\tlista1=list()\n\t\t\tlista2=list()\n\t\t\tfor i in range(1,quality+1):\n\t\t\t\tfor key,value in kwargs.iteritems():\n\t\t\t\t\tif key==\"expect\"+str(i):\n\t\t\t\t\t\tlista1.append(value)\n\t\t\tfor i in range(1,quality+1):\n for key,value in kwargs.iteritems():\n if key==\"send\"+str(i):\n lista2.append(value)\n\t\t\ta=open(\"ext\",\"w\")\n\t\t\ta.close()\n\t\t\tos.system(\"sudo chmod +x ext\")\n\t\t\tarchivo=open(\"ext\",\"a\")\n\t\t\tarchivo.write(\"#!/bin/bash\\n#!/usr/bin/expect -f\\n\")\n\t\t\tnum=0\n\t\t\tfor i in range(0,len(lista1)):\n\t\t\t\tarchivo.write(\"set exp\"+str(i+1)+\" [lindex $argv \"+str(num)+\"]\\n\")\n\t\t\t\tarchivo.write(\"set send\"+str(i+1)+\" [lindex $argv \"+str(num+1)+\"]\\n\")\n\t\t\t\tnum+=2\n\t\t\tarchivo.write(\"spawn \"+spawn+\"\\n\")\n\t\t\tarchivo.write(\"expect -exact \\\"$exp1\\\"\\n\")\n\t\t\tarchivo.write(\"send -- \\\"$send1\\\\r\\\"\\n\")\n\t\t\tfor h in range(0,quality-1):\n\t\t\t\tarchivo.write(\"expect \\\"$exp\"+str(int(h+2))+\"\\\"\\n\")\n\t\t\t\tarchivo.write(\"send -- \\\"$send\"+str(int(h+2))+\"\\\\r\\\"\\n\")\n\t\t\tarchivo.write(\"expect eof\")\n\t\t\tarchivo.close()\n\t\t\tvar=\"\"\n\t\t\tfor i in range(0,quality):\n\t\t\t\tvar+=\"\\\"\"+lista1[i]+\"\\\" \"\n\t\t\t\tvar+=\"\\\"\"+lista2[i]+\"\\\" \"\n\t\t\tvar=\"expect ext \"+var\n\t\t\tos.system(var)\n\t\t\tos.system(\"sudo rm ext\")\n","sub_path":"Expect.py","file_name":"Expect.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"364117359","text":"# Copyright (c) 2019-2021, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom dask.distributed import wait, default_client\nfrom cugraph.dask.common.input_utils import (get_distributed_data,\n get_vertex_partition_offsets)\nfrom cugraph.dask.traversal import mg_sssp_wrapper as mg_sssp\nimport cugraph.comms.comms as Comms\nimport cudf\nimport dask_cudf\n\n\ndef call_sssp(sID,\n data,\n num_verts,\n num_edges,\n vertex_partition_offsets,\n start):\n wid = Comms.get_worker_id(sID)\n handle = Comms.get_handle(sID)\n return mg_sssp.mg_sssp(data[0],\n num_verts,\n num_edges,\n vertex_partition_offsets,\n wid,\n handle,\n start)\n\n\ndef sssp(graph,\n source):\n\n \"\"\"\n Compute the distance and predecessors for shortest paths from the specified\n source to all the vertices in the graph. The distances column will store\n the distance from the source to each vertex. The predecessors column will\n store each vertex's predecessor in the shortest path. Vertices that are\n unreachable will have a distance of infinity denoted by the maximum value\n of the data type and the predecessor set as -1. The source vertex's\n predecessor is also set to -1.\n The input graph must contain edge list as dask-cudf dataframe with\n one partition per GPU.\n\n Parameters\n ----------\n graph : cugraph.DiGraph\n cuGraph graph descriptor, should contain the connectivity information\n as dask cudf edge list dataframe.\n Undirected Graph not currently supported.\n source : Integer\n Specify source vertex\n\n Returns\n -------\n df : dask_cudf.DataFrame\n df['vertex'] gives the vertex id\n\n df['distance'] gives the path distance from the\n starting vertex\n\n df['predecessor'] gives the vertex id it was\n reached from in the traversal\n\n Examples\n --------\n >>> import cugraph.dask as dcg\n >>> ... Init a DASK Cluster\n >> see https://docs.rapids.ai/api/cugraph/stable/dask-cugraph.html\n >>> chunksize = dcg.get_chunksize(input_data_path)\n >>> ddf = dask_cudf.read_csv(input_data_path, chunksize=chunksize,\n delimiter=' ',\n names=['src', 'dst', 'value'],\n dtype=['int32', 'int32', 'float32'])\n >>> dg = cugraph.DiGraph()\n >>> dg.from_dask_cudf_edgelist(ddf, 'src', 'dst')\n >>> df = dcg.sssp(dg, 0)\n \"\"\"\n\n client = default_client()\n\n graph.compute_renumber_edge_list(transposed=False)\n ddf = graph.edgelist.edgelist_df\n vertex_partition_offsets = get_vertex_partition_offsets(graph)\n num_verts = vertex_partition_offsets.iloc[-1]\n num_edges = len(ddf)\n data = get_distributed_data(ddf)\n\n if graph.renumbered:\n source = graph.lookup_internal_vertex_id(cudf.Series([source],\n dtype='int32')).compute()\n source = source.iloc[0]\n\n result = [client.submit(\n call_sssp,\n Comms.get_session_id(),\n wf[1],\n num_verts,\n num_edges,\n vertex_partition_offsets,\n source,\n workers=[wf[0]])\n for idx, wf in enumerate(data.worker_to_parts.items())]\n wait(result)\n ddf = dask_cudf.from_delayed(result)\n\n if graph.renumbered:\n ddf = graph.unrenumber(ddf, 'vertex')\n ddf = graph.unrenumber(ddf, 'predecessor')\n ddf[\"predecessor\"] = ddf[\"predecessor\"].fillna(-1)\n\n return ddf\n","sub_path":"python/cugraph/dask/traversal/sssp.py","file_name":"sssp.py","file_ext":"py","file_size_in_byte":4279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"436580016","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('classifier', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='classifier',\n options={'ordering': ['-sort', 'translit'], 'verbose_name_plural': '\\u041a\\u043b\\u0430\\u0441\\u0441\\u0438\\u0444\\u0438\\u043a\\u0430\\u0442\\u043e\\u0440'},\n ),\n migrations.AddField(\n model_name='classifier',\n name='classifiers',\n field=models.ManyToManyField(related_name='classifiers_rel_+', to='classifier.Classifier', blank=True),\n preserve_default=True,\n ),\n ]\n","sub_path":"classifier/migrations/0002_auto_20150112_1243.py","file_name":"0002_auto_20150112_1243.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"284065830","text":"# 面���题26:树的子结构\n# 输入两棵二叉树A和B,判断B是不是A的子结构。约定空树不是任何一个树的子结构\n\n# 思路分析:首先在A中找到值与B的根节点的值相等的节点,\n# 然后判断找到的节点是不是含有和B一样的结构;\n# 如果是则返回True, 不是则需要继续在A中遍历找到与B的根节点的值相等的节点,依次重复判断\n\n\n# 树节点定义\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution(object):\n def HasSubtree(self, pRoot1, pRoot2):\n\n result = False\n\n if pRoot1 and pRoot2:\n # 这里有一个细节是计算机是如何判定两个小数相等的,计算机内表示小数时都有误差,\n # 因此判断两个小数是否相等,是判断它们的差的绝对值是不是小于某个很小的阈值,是的话就判定它们相等\n if pRoot1.val == pRoot2.val:\n result = self.iseuqual(pRoot1, pRoot2) # 注意函数能执行这一步意味着A中至少有一个节点的值与B的根节点的值相同\n\n # 判断左子树中是否包含B树结构\n if not result:\n result = self.HasSubtree(pRoot1.left, pRoot2)\n # 判断右子树中是否包含B树结构\n if not result:\n result = self.HasSubtree(pRoot1.right, pRoot2)\n\n return result\n\n def iseuqual(self, root1, root2):\n # 如果B的子树为空,那么可以算B是A的子结构\n if not root2:\n return True\n # 如果A的子树为空,而B的子树不为空,那么B不是A的子结构\n if not root1:\n return False\n if root1.val != root2.val:\n return False\n \"\"\"前面三个if主要是再判断子结构中生效的\"\"\"\n\n # 根节点值相同,再判断左右子树是否都相同\n return self.iseuqual(root1.left, root2.left) and self.iseuqual(root1.right, root2.right)\n","sub_path":"code_with_name/test25_prob26_树的子结构.py","file_name":"test25_prob26_树的子结构.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"95517980","text":"\"\"\"\nGym Environment for the Yamor robot of the\nWebots simulator (www.cyberbotics)\n\nAuthor: Florin Dzeladini\n\"\"\"\nimport numpy as np\nfrom gym import utils\nfrom gym.envs.webots import webots_env\n\nYAMOR_CFG = {\n 'obs_dim' : 7,\n 'action_space' : np.array([\n [ -1, 1],\n [ -1, 1],\n [ -1, 1],\n [ -1, 1],\n [ -1, 1],\n [ -1, 1],\n [ -1, 1],\n [ -1, 1]\n ])\n}\n\nclass Yamor(webots_env.WebotsEnv):\n\n time = 0\n doneTimes = 0\n hardStopLongitudinalConstrain = -0.05\n\n\n\n def __init__(self):\n webots_env.WebotsEnv.__init__(self, YAMOR_CFG, 2)\n\n\n def _step(self,action):\n self.time += self.frame_skip*self.model.opt['timestep']\n reward_forward = 0\n reward_leg = 0\n reward_survive = 0\n\n # to use velocities for reward : maximizes acceleration direction\n optimizeFor = 'qvel'\n # to use positions for reward : maximizes speed direction\n #optimizeFor = 'qpos'\n\n if(len(self.model.data[optimizeFor]) == 0): # if first step\n self.do_simulation(action, self.frame_skip)\n else:\n # 1) Save state before doing action\n head_vec_before = self.model.data[optimizeFor][0,0:3]\n mid_vec_before = self.model.data[optimizeFor][3,0:3]\n leg_vec_before = 0.5*(self.model.data[optimizeFor][0,0:3]+self.model.data[optimizeFor][-1,0:3])\n target_vec_longitudinal_motion = [0,0,-1]\n target_vec_armlike_motion = np.array([1,0,0])\n target_vec_armlike_motion = target_vec_armlike_motion/np.linalg.norm(target_vec_armlike_motion)\n # 2) Do action\n self.do_simulation(action, self.frame_skip)\n head_vec_after = self.model.data[optimizeFor][0,0:3]\n leg_vec_after = 0.5*(self.model.data[optimizeFor][0,0:3]+self.model.data[optimizeFor][-1,0:3])\n mid_vec_after = self.model.data[optimizeFor][3,0:3]\n # 3) Compute reward\n #reward = np.dot(target_vec,(head_vec_after-head_vec_before))\n ndiffHead = mid_vec_after-mid_vec_before\n norm = np.linalg.norm(ndiffHead)\n if norm != 0:\n ndiffHead = ndiffHead/norm\n ndiffLeg = leg_vec_after-leg_vec_before\n norm = np.linalg.norm(ndiffLeg)\n if norm != 0:\n ndiffLeg = ndiffLeg/norm\n reward_forward = np.dot(target_vec_armlike_motion,ndiffHead)\n reward_leg = np.dot(target_vec_armlike_motion,ndiffLeg)\n\n\n # 4) Return everything\n ob = self.state_vector()\n\n done = False\n\n # hardStopLongitudinalConstrain IS\n # = self.model.data['qpos'][4][0]-0.05\n # = -0.05 otherwise\n if(self.model.data['qpos'][4][0] > -0.05 and self.model.data['qpos'][4][0] - 0.05 > self.hardStopLongitudinalConstrain):\n self.hardStopLongitudinalConstrain = self.model.data['qpos'][4][0] - 0.05\n\n\n\n\n if(self.time > 10+self.doneTimes/30.0):\n self.time = 0;\n done = True\n #if((self.time > 1.0 and self.model.data['qpos'][0][2] > 0) ):\n if(self.model.data['qpos'][4][0] < 0.0):\n reward_survive = -10\n if((self.time > 1.0 and self.model.data['qpos'][4][0] < -0.05) ):\n reward_survive = -10\n self.time = 0;\n done = True\n\n if(done):\n self.hardStopLongitudinalConstrain = -0.05\n print(\"done, next will last {} sec.\".format(10+self.doneTimes/30.0))\n self.doneTimes += 1\n\n\n reward = max(0,reward_forward)+max(0,reward_leg)+reward_survive\n return ob, reward, done, dict(\n reward_forward=max(0,reward_forward),\n reward_ctrl=max(0,reward_leg),\n reward_contact=0,\n reward_survive=reward_survive)\n def state_vector(self):\n # State vector not in absolute coordinate but relative to the\n # average position\n #ref = sum(self.model.data['qpos'][:,:])/len(self.model.data['qpos'])\n ref = self.model.data['qpos'][3,:] # only a subpart used for ref\n return np.concatenate([\n np.array([self.model.data['phase']]).flat,\n (self.model.data['qpos'][:,:]-ref).flat\n #https://www.cyberbotics.com/doc/reference/supervisor#wb_supervisor_node_get_velocity\n #TODO : not clear how to transform angular velocities in local coordinate\n #(self.model.data['qvel'][4:6,3:]-ref).flat # velocities is a 6 DOF in webots\n ])\n","sub_path":"gym/envs/webots/yamor.py","file_name":"yamor.py","file_ext":"py","file_size_in_byte":4528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"408834951","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 10 22:33:33 2017\n\n@author: yxl\n\"\"\"\nfrom imagepy.core.engine import Simple, Filter, Free\nfrom scipy.ndimage.filters import gaussian_filter\nfrom sciapp.object import Surface, MarkText\nfrom sciapp.util import surfutil\n\nclass Show(Free):\n title = 'Show Viewer 3D'\n def run(self, para):\n self.app.show_mesh()\n\nclass Surface2D(Simple):\n title = '2D Surface'\n note = ['8-bit', '16-bit', 'float']\n para = {'name':'undifine', 'scale':2, 'sigma':2,'h':1}\n view = [(str, 'name', 'Name', ''),\n (int, 'scale', (1,5), 0, 'down scale', 'pix'),\n (int, 'sigma', (0,30), 0, 'sigma', ''),\n (float, 'h', (0.1,10), 1, 'scale z', '')]\n\n def run(self, ips, imgs, para = None):\n ds, sigma = para['scale'], para['sigma']\n vts, fs, ns, cs = surfutil.build_surf2d(ips.img, ds=ds, sigma=para['sigma'], k=para['h'])\n self.app.show_mesh(Surface(vts, fs, ns, cs), para['name'])\n\nclass Surface3D(Simple):\n modal = False\n title = '3D Surface'\n note = ['8-bit', 'stack3d', 'preview']\n para = {'name':'undifine', 'ds':2, 'thr':128, 'step':1, 'color':(0,255,0)}\n view = [(str, 'name', 'Name', ''),\n ('slide', 'thr', (0,255), 0, 'threshold'),\n (int, 'ds', (1,20), 0, 'down scale', 'pix'),\n (int, 'step', (1,20), 0, 'march step', 'pix'),\n ('color', 'color', 'color', 'rgb')]\n\n def load(self, ips):\n self.buflut = ips.lut\n ips.lut = ips.lut.copy()\n return True\n \n def preview(self, ips, para):\n ips.lut[:] = self.buflut\n ips.lut[:para['thr']] = [255,0,0]\n\n def cancel(self, ips):\n ips.lut = self.buflut\n\n def run(self, ips, imgs, para = None):\n ips.lut = self.buflut\n cs = tuple([int(i/255.0) for i in para['color']])\n vts, fs, ns, cs = surfutil.build_surf3d(ips.imgs, para['ds'], para['thr'], para['step'], cs)\n self.app.show_mesh(Surface(vts, fs, ns, cs), para['name'])\n\nclass ImageCube(Simple):\n modal = False\n title = '3D Image Cube'\n note = ['8-bit', 'rgb', 'stack3d']\n para = {'name':'undifine', 'ds':1, 'color':(0,255,0), 'surface':True, 'box':False}\n view = [(str, 'name', 'Name', 'xxx-surface'),\n (bool, 'surface', 'show surface'),\n (int, 'ds', (1,20), 0, 'down scale', 'pix'),\n (bool, 'box', 'show box'),\n ('color', 'color', 'box color', 'rgb')]\n\n def run(self, ips, imgs, para = None):\n if para['surface']:\n vts, fs, ns, cs = surfutil.build_img_cube(imgs, para['ds'])\n self.app.show_mesh(Surface(vts, fs, ns, cs), para['name']+'-surface')\n if para['box']:\n vts, fs, ns, cs = surfutil.build_img_box(imgs, para['color'])\n self.app.show_mesh(Surface(vts, fs, ns, cs, mode='grid'), para['name']+'-box')\n\nplgs = [Show, Surface2D, Surface3D, ImageCube]\n","sub_path":"imagepy/menus/Kit3D/Viewer 3D/surface_plgs.py","file_name":"surface_plgs.py","file_ext":"py","file_size_in_byte":2914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"300560352","text":"#!/usr/bin/env python3\r\n# -*- coding:UTF8 -*-\r\n\r\nimport cv2 as cv\r\nimport numpy as np\r\n\r\n# 引入python自带的UI库r\r\nimport tkinter as tk\r\nfrom tkinter import ttk\r\n# 导入帮助工具\r\nimport PIL.Image, PIL.ImageTk\r\n\r\n# 创建Window\r\nwindow = tk.Tk()\r\nwindow.title('template')\r\n\r\ntarget = cv.imread(\"./image/wyf1.jpg\")\r\ntpl = cv.imread(\"./image/head.jpg\")\r\n\r\ntarget_gray = cv.cvtColor(target, cv.COLOR_BGR2GRAY)\r\ntpl_gray = cv.cvtColor(tpl, cv.COLOR_BGR2GRAY)\r\n\r\n# if target is None:\r\n# raise\r\n\r\nphoto = PIL.ImageTk.PhotoImage(image = PIL.Image.fromarray(target.copy()[:,:,::-1]))\r\n\r\nh, w = target.shape[:2]\r\nh1, w1 = tpl.shape[:2]\r\n\r\n\r\nvar = tk.IntVar()\r\nlabel = tk.Label(window)\r\nlabel.config(text = \"选择匹配方法\")\r\nlabel.pack()\r\n\r\ncanvas = tk.Canvas(window,width = w * 2, height = h)\r\n\r\ndef sel():\r\n global photo\r\n global result\r\n methods = [cv.TM_SQDIFF_NORMED, cv.TM_CCORR_NORMED, cv.TM_CCOEFF_NORMED, cv.TM_CCOEFF, cv.TM_CCORR, cv.TM_SQDIFF]\r\n methodsName = ('cv.TM_SQDIFF_NORMED', 'cv.TM_CCORR_NORMED', 'cv.TM_CCOEFF_NORMED', 'cv.TM_CCOEFF', 'cv.TM_CCORR', 'cv.TM_SQDIFF')\r\n selection = \"你选择的是\" + str(methodsName[var.get()])\r\n label.config(text = selection)\r\n method = methods[var.get()]\r\n res = cv.matchTemplate(target_gray, tpl_gray, method)\r\n min_val, max_val, min_loc, max_loc = cv.minMaxLoc(res)\r\n if method in [cv.TM_SQDIFF, cv.TM_SQDIFF_NORMED]:\r\n top_left = min_loc\r\n else:\r\n top_left = max_loc\r\n\r\n hh, ww = tpl_gray.shape\r\n bottom_right = (top_left[0] + ww, top_left[1] + hh)\r\n img_show = target.copy()\r\n cv.rectangle(img_show, top_left, bottom_right, (0, 255, 0), 2)\r\n img_show = img_show[:,:,::-1]\r\n photo = PIL.ImageTk.PhotoImage(image = PIL.Image.fromarray(img_show))\r\n canvas.create_image(w//2, 0, image=photo, anchor=tk.NW)\r\n cv.imshow('result', res)\r\n\r\n # 添加图像到画布\r\n canvas.pack()\r\n\r\n\r\nR1 = tk.Radiobutton(window, \r\n text=\"cv.TM_SQDIFF_NORMED\", \r\n variable=var, \r\n value=0,\r\n command=sel)\r\nR1.pack( anchor = tk.W )\r\n\r\nR2 = tk.Radiobutton(window, \r\n text=\"cv.TM_CCORR_NORMED\", \r\n variable=var, \r\n value=1,\r\n command=sel)\r\nR2.pack( anchor = tk.W )\r\n\r\nR3 = tk.Radiobutton(window, \r\n text=\"cv.TM_CCOEFF_NORMED\", \r\n variable=var, \r\n value=2,\r\n command=sel)\r\nR3.pack( anchor = tk.W)\r\n\r\nR4 = tk.Radiobutton(window, \r\n text=\"cv.TM_CCOEFF\", \r\n variable=var, \r\n value=3,\r\n command=sel)\r\nR4.pack( anchor = tk.W)\r\n\r\nR5 = tk.Radiobutton(window, \r\n text=\"cv.TM_CCORR\", \r\n variable=var, \r\n value=4,\r\n command=sel)\r\nR5.pack( anchor = tk.W)\r\n\r\nR6 = tk.Radiobutton(window, \r\n text=\"cv.TM_SQDIFF\", \r\n variable=var, \r\n value=5,\r\n command=sel)\r\nR6.pack( anchor = tk.W)\r\n\r\ncanvas.create_image(w//2, 0, image = photo, anchor=tk.NW)\r\ncanvas.pack()\r\n\r\nwindow.mainloop()\r\n\r\n\r\n","sub_path":"openCV/chapter07/3 template_showime.py","file_name":"3 template_showime.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"328250704","text":"# Program for sorting the array using reverse function(arr, i, j).\n# This is called pan-cake sorting.\n\n# [3, 2, 4, 1] => [1, 2, 3, 4]\n#\n# * reverse(arr, i, j)\n#\n# reverse(0, 2) => [4, 2, 3, 1]\n# reverse(0, 3) => [1, 3, 2, 4]\n# reverse(0, 1) => [3, 1, 2, 4]\n# reverse(0, 2) => [2, 1, 3, 4]\n# reverse(0, 0) => [2, 1, 3, 4]\n# reverse(0, 1) => [1, 2, 3, 4]\n#\n# Idea is to one by one simply place the elements at its correct position, and for that we need to go for max element\n# one by one.\n# Let's say, we move max element \"4\" from its position to beginning, and then reversing the entire array will move it to last.\n# [1, 3, 2, 4]\n# Now, one element that is max element, is at its correct position.\n# Then, we move to next max element \"3\" and move it to first index again by reversing till that found position, and then\n# finally again reverse the array till size - 1, as one element is already sorted.\n# [2, 1, 3, 4]\n# Now, two elements are correctly placed.\n# Similarly, going for \"2\", we reverse and then again reverse for size-2, giving [1, 2, 3, 4]\n# Finally, array is sorted.\n\n# ------------------------------------------------------------------------------------------------------------------------\n# TIME : 0(N ^ 2)\n# SPACE : 0(N), # ONLY IF OUTPUT IS REQUIRED \n\nimport sys\n\ndef reverse(arr, left, right):\n\n while left < right:\n arr[left], arr[right] = arr[right], arr[left]\n left += 1\n right -= 1\n\n\ndef find_max(arr, n):\n max_idx, max_ele = None, -sys.maxsize-1\n for i in range(n):\n if arr[i] > max_ele:\n max_idx = i\n max_ele = arr[i]\n return max_idx\n\ndef pancake_sort(arr, n):\n\n if not arr:\n return\n\n curr_size = n\n res = [] # contains all the flips (reverse) we do\n while curr_size > 1:\n max_idx = find_max(arr, curr_size)\n\n if max_idx != curr_size - 1:\n reverse(arr, 0, max_idx)\n reverse(arr, 0, curr_size-1)\n\n res.append(max_idx + 1) # in actually, its max_idx but since in LC, we assumed 1-based indexing\n res.append(curr_size) # similarly like aboev, curr_size - 1\n curr_size -= 1\n\n return res\n\nif __name__ == '__main__':\n arr = [3, 2, 4, 1]\n print(arr)\n print(pancake_sort(arr, len(arr)))\n print(arr)\n","sub_path":"sorting algo/pancake_sorting.py","file_name":"pancake_sorting.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"417705900","text":"#!/usr/bin/python\n\nimport numpy as np\nimport random\n\n#############################################################################\n# #\n# DONNEES ASSOCIEES A LA RESOLUTION DES EQUATIONS D'UN RESEAU #\n# #\n# Probleme_S : reseau parametrable #\n# #\n#############################################################################\n\n# Variables du probleme\n#\n# nom : nom du reseau\n#\n# n : nombre total d'arcs\n# m : nombre total de noeuds\n# mr : nombre de noeuds de type reservoir\n# md : nombre de noeuds de type demande (= m-mr)\n#\n# orig : vecteur des numeros des noeuds initiaux des arcs : M(1,n)\n# dest : vecteur des numeros des noeuds finaux des arcs : M(1,n)\n# absn : vecteur des abscisses des noeuds : M(1,m)\n# ordn : vecteur des ordonnees des noeuds : M(1,m)\n#\n# r : vecteur des resistances des arcs : M(n,1)\n# pr : vecteur des pressions des noeuds reservoirs : M(mr,1)\n# fd : vecteur des flux des noeuds de demande : M(md,1)\n\n\n##### Nom du reseau\n\nnom = \"Parametrable\"\n\n##### Nombre de niveaux\n\nT =7\n\n##### Initialisation du generateur aleatoire\n\ngral = 123 # Graine Aleatoire\nrandom.seed(gral)\n\n##### Dimensions du reseau\n\nm = (2**(T+1)) - 1\nmr = 1\nmd = m - mr\nn = ((2**(T+1))-1) + ((2**(T+1))-1) - (T+1) - 1\n\n##### Caracteristiques des noeuds et des arcs\n\norig = []\ndest = []\n\n# Arcs de l'arbre\nnum = 1\nfor t in range (0,T):\n ni = (2**t);\n nf = (2**(t+1)) - 1;\n nz = 2 * (nf-ni+1);\n aorg = np.array([range(ni,nf+1),range(ni,nf+1)])\n orig = np.concatenate((orig,aorg.flatten('F')), axis=0)\n dest = np.concatenate((dest,np.array(range(num+1,num+nz+1))), axis=0)\n num = num + nz;\n \n# Arcs du coarbre\nfor t in range(1, T+1):\n ni = (2**t);\n nf = (2**(t+1)) - 1;\n orig = np.concatenate((orig,np.array(range(ni,nf))), axis=0)\n dest = np.concatenate((dest,np.array(range(ni+1,nf+1))), axis=0)\n \n# Coordonnees des noeuds\nabsn = []\nfor t in range(0,T+1):\n ni = (2**t);\n nf = (2**(t+1)) - 1;\n na = 2**(T-t+1);\n nb = 2**(T-t);\n num = na*np.array(range(0,nf-ni+1)) + nb\n absn = np.concatenate((absn,num),axis=0)\n \nordn = []\nfor t in range(0,T+1):\n ni = (2**t);\n nf = (2**(t+1)) - 1;\n ordn = np.concatenate((ordn,(T-t+1)*np.ones(nf-ni+1)),axis=0)\n\n# Resistances des arcs\nr = 1000 * np.random.rand(n,1)\n\n# Pressions au pied du reservoir (en m)\npr = np.array([200])\n\n# Flux aux noeuds de demande (en m3/s)\nfd = 0.1 * (np.random.rand(md,1)-0.5)\n","sub_path":"Probleme_S.py","file_name":"Probleme_S.py","file_ext":"py","file_size_in_byte":2804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"80153519","text":"import copy\nimport heapq\nimport random\n\nfrom heap import heappushes, heapifies\n\nclass TestHeap():\n def test_heappush_random(self):\n for heappush in heappushes:\n random.seed(0)\n\n for i in (i ** 2 for i in range(10)):\n data0, data1 = [], []\n\n for j in range(i):\n item = random.randint(-1024, 1024)\n heapq.heappush(data0, item)\n heappush(data1, item)\n\n msg = \"{} failed\".format(heappush.__name__)\n assert data0 == data1, msg\n\n def test_heapify_random(self):\n for heapify in heapifies:\n random.seed(0)\n\n for i in (i ** 2 for i in range(20)):\n data0 = [random.randint(-1024, 1024) for j in range(i)]\n data1 = copy.copy(data0)\n\n heapq.heapify(data0)\n heapify(data1)\n\n msg = \"{} failed\".format(heapify.__name__)\n assert data0 == data1, msg\n","sub_path":"tests/test_heap.py","file_name":"test_heap.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"550141195","text":"# -*- coding: utf-8 -*-\nfrom API.base_api import check_response_data\nfrom API.wbs.base_wbs_api import WBSAPIBaseTestMixin\nimport unittest\nimport pytest\nimport os\nfrom nose.plugins.attrib import attr\nfrom fake_data import FakeData\n\n\n@attr('internal')\n@attr('external')\nclass DepartmentAddTest(WBSAPIBaseTestMixin, unittest.TestCase):\n view_url = 'department/add.json' if os.getenv('api_type') == 'ex' else 'add.json'\n fake = FakeData()\n expected_response_format = {\n \"success\": bool,\n \"msg\": unicode\n }\n\n @check_response_data\n def tests_response_with_successful_workflow(self):\n self.expected_response = {\n \"success\": True,\n \"msg\": u\"操作成功\"\n }\n\n @classmethod\n def setUpClass(cls):\n data_dict = {\n \"token\": cls.generate_token(),\n \"param\": {\n \"name\": cls.fake.department_name(),\n \"departmentTypeCode\": \"1\"\n }\n }\n cls.data = 'data={0}'.format(cls.dict_to_json(data_dict))\n super(DepartmentAddTest, cls).setUpClass()\n","sub_path":"automation-testing/API/wbs/saas/department/tests_add.py","file_name":"tests_add.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"586827558","text":"# find the two entries that sum to 2020 and then multiply those two numbers together\r\n\r\n# input\r\nwith open('1.txt', 'r') as file:\r\n input = file.read()\r\n\r\n# turn the input into a list\r\nlist_str = list(input.split('\\n'))\r\n\r\n# get a list of ints\r\nlist_int = []\r\nfor k in list_str:\r\n list_int.append(int(k))\r\n\r\n# find the pair and multiply them\r\nfor i in list_int:\r\n for j in list_int:\r\n if i + j == 2020:\r\n print(i*j)","sub_path":"1a.py","file_name":"1a.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"104463728","text":"import pandas as pd\ndef pick_rules(row, user_interest_list):\n \n consider_antecedents = True\n consider_consequents = True\n\n # Loop through all antecedents and find is this antecedents are\n # related to user intersts or not?.\n for item in row.antecedents:\n if item not in user_interest_list:\n consider_antecedents = False\n\n if consider_antecedents:\n # Antecedents are matched\n\n # Now check if consequents of this rule is not in users Interests\n # then this rule is for this user.\n for item in row.consequents:\n if item in user_interest_list:\n consider_consequents = False\n \n if(consider_consequents):\n # This rule is matched with the user.\n return True\n else:\n return False\n\n else:\n return False\n \ndef genrate_profile():\n \n rul_df=pd.read_csv('brands_recommendation.csv',converters={'antecedents': eval, 'consequents': eval}) # get user name and interests of new user.\n userinterests = 'BATA,NIKE,STYLO'\n username = 'muzammmil'\n # convert user interest into list.\n u_i_list = userinterests.split(',')\n\n applied_rul_df = pd.DataFrame(columns=rul_df.columns)\n \n for index, row in rul_df.iterrows():\n if(pick_rules(row,u_i_list)):\n applied_rul_df = applied_rul_df.append(row)\n \n # get a rule which have higest lift \n if(applied_rul_df.shape[0] >= 1):\n \n\n columns_names = ['antecedents', 'consequents', 'antecedent support', 'consequent support', 'support', 'confidence', 'lift', 'leverage', 'conviction'] \n selected_rule= applied_rul_df.iloc[applied_rul_df.lift.argmax()]\n userSeleted_Interest = selected_rule.consequents\n new_user = {\n \"Name\": username,\n \"interests\": userSeleted_Interest\n }\n \n else:\n # In case no rule is matched with the user Interests.\n new_user = {\n \"Name\": username,\n \"interests\": \"PAKISTAN\"\n }\n print(new_user)\n \ngenrate_profile()\n","sub_path":"BrandsSale/Brandsale/users_api/Recommendation.py","file_name":"Recommendation.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"377087564","text":"#!/usr/bin/env python\n#\n# Copyright 2010 \n\nfrom __future__ import with_statement\n\n\"\"\"Tools for parsing logs output by Crux and Tide.\n\nReferences:\n[1] http://noble.gs.washington.edu/proj/crux/\n\n\"\"\"\n\n__authors__ = [ 'Ajit Singh ' ]\n\nimport csv\nimport os\nimport sys\n\nfrom operator import itemgetter\nfrom protein.peptide import Peptide\nfrom itertools import islice\n\nfrom progress import ProgressBar, Percentage, Bar, ETA\nfrom buzhug import Base\n\nclass ParseError(Exception):\n def __init__(self, prefix, filename, lineno, line):\n self.prefix = prefix\n self.filename = filename\n self.lineno = lineno\n self.line = line\n def __str__(self):\n return repr('%s%s(%d): %s' %\n (self.prefix, self.filename, self.lineno, self.line))\n\ndef parse_crux_search_txt(filename):\n \"\"\"Iterate over records in a search.{target,decoy}.txt.\n\n Crux txt format files are tab-delimited with 30 fields*, described\n in the online documentation [1]. This function returns an iterator\n which yields a dictionary with the fields and their values.\n\n * 'decoy q-value (p-value)' is not output by Crux, at least as of v1.33.\n\n [1] http://noble.gs.washington.edu/proj/crux/txt-format.html\n\n Arguments:\n filename: Name of the crux search-for-matches output.\n\n Returns:\n Dictionary that maps field names to values. Only fields that\n are non-empty in the input exist in the returned dictionary.\n Many of the fields are not usually set in the output of crux\n search-for-matches, and will not be available.\n\n \"\"\"\n fields = ['scan', # int\n 'charge', # int\n 'spectrum precursor m/z', # float\n 'spectrum neutral mass', # float\n 'peptide mass', # float\n 'delta_cn', # float\n 'sp score', # float\n 'sp rank', # float\n 'xcorr score', # float\n 'xcorr rank', # int\n 'p-value', # float\n 'Weibull est. q-value', # float\n 'decoy q-value (xcorr)', # float\n 'percolator score', # float\n 'percolator rank', # int\n 'percolator q-value', # float\n 'q-ranker score', # float\n 'q-ranker q-value', # float\n 'b/y ions matched', # int\n 'b/y ions total', # int\n 'matches/spectrum', # int\n 'sequence', # string\n 'cleavage type', # string\n 'protein id', # string\n 'flanking aa', # string\n 'unshuffled sequence', # string\n 'eta', # float\n 'beta', # float\n 'shift', # float\n 'corr'] # float\n casts = [ int, int, float, float, float, float, float, float, float, int,\n float, float, float, float, int, float, float, float, int, int,\n int, str, str, str, str, str, float, float, float, float ]\n assert(len(fields) == len(casts))\n\n _mandatories = [ 'scan', 'charge', 'spectrum precursor m/z',\n 'spectrum neutral mass', 'xcorr score',\n 'xcorr rank', 'sequence' ]\n\n def conv(f, value):\n value = value.strip()\n if len(value):\n return f(value)\n\n def validate(record):\n return all(record.has_key(m) for m in _mandatories)\n\n widgets = [ Percentage(), Bar(), ETA() ]\n progress = ProgressBar(widgets = widgets,\n maxval = os.path.getsize(filename)).start()\n\n with open(filename) as f:\n reader = csv.reader(f, delimiter='\\t')\n # Header\n row = reader.next()\n if row != fields:\n raise ParseError('Header: ', filename, 1, ' '.join(row))\n # Body\n for row in reader:\n progress.update(f.tell())\n if len(row) != len(fields):\n raise ParseError('Line: ', filename, reader.line_num,\n ' '.join(row))\n\n r = dict((k, conv(f,x)) for k, f, x in zip(fields, casts, row))\n if r:\n if not validate(r):\n raise ParseError('Missing: ', filename, reader.line_num,\n ' '.join(row))\n yield r\n\n progress.finish()\n sys.stdout.write('\\n')\n\ndef parse_crux_search_txt_dict(filename, scorer = 'xcorr', sortlists = True,\n nrecords = None):\n \"\"\"Extract scored peptide-spectrum matches from crux search-for-matches.\n\n It is the caller's responsibility to make sure that the crux log file\n has the required type of score. XCorr is always returned, but other scores\n may or may not be defined in 'filename'.\n\n Arguments:\n filename: Text output generated by Crux, usu. search.{target,decoy}.txt.\n scorer: Score type to use, one of {'xcorr', 'percolator', 'q-ranker',\n 'delta_cn', 'sp'}\n sortlists: If true, the list of candidates is sorted in order of\n decreasing score.\n nrecords: Number of records to read in. If None, read all the records.\n\n Returns:\n A dictionary which maps scan id to a list of (peptide, score) tuples,\n which are the peptides tested against the spectrum.\n\n \"\"\"\n mapper = { 'xcorr' : 'xcorr score',\n 'percolator' : 'percolator score',\n 'q-ranker' : 'q-ranker score',\n 'delta_cn' : 'delta_cn',\n 'sp' : 'sp score' }\n if scorer not in mapper:\n raise ValueError('\"%s\" is not a valid scorer' % scorer)\n\n dic = { }\n for record in islice(parse_crux_search_txt(filename), nrecords):\n sid = record['scan']\n if sid not in dic:\n dic[sid] = [ ]\n\n score = record[mapper[scorer]]\n peptide = Peptide(record['sequence'])\n dic[sid].append((peptide, score))\n\n if sortlists:\n for sid, lst in dic.iteritems():\n dic[sid] = sorted(lst, key = itemgetter(1), reverse = True)\n\n return dic\n\n","sub_path":"sourceCodes/python/crux/logparser.py","file_name":"logparser.py","file_ext":"py","file_size_in_byte":6043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"440819928","text":"import os\n\nimport pandas as pd\n\n\nif __name__ == '__main__':\n base_dir = '../data/derived/'\n columbia_df = pd.read_csv(os.path.join(base_dir, 'columbia_prototype_contexts.csv'))\n acronyms = pd.read_csv(os.path.join(base_dir, 'prototype_acronym_expansions.csv'))\n\n lfs = acronyms['lf'].unique().tolist()\n sfs = acronyms['sf'].unique().tolist()\n\n form_to_sf = {}\n for lf in lfs:\n form_to_sf[lf] = acronyms[acronyms['lf'] == lf]['sf'].tolist()[0]\n for sf in sfs:\n form_to_sf[sf] = sf\n\n mimic_chunk_dfs = []\n mimic_dir = os.path.join(base_dir, 'mimic')\n mimic_chunks = os.listdir(mimic_dir)\n mimic_chunks = [m for m in mimic_chunks if '.csv' in m]\n print('Collecting chunks of processed MIMIC contexts...')\n n_mimic = 0\n for chunk_idx, fn in enumerate(mimic_chunks):\n chunk_df = pd.read_csv(os.path.join(mimic_dir, fn))\n n_mimic += chunk_df.shape[0]\n if chunk_df.shape[0] > 0:\n mimic_chunk_dfs.append(chunk_df)\n if (chunk_idx + 1) % 25 == 0 or (chunk_idx + 1) == len(mimic_chunks):\n print('\\tProcessed {} out of {} MIMIC batches'.format(chunk_idx + 1, len(mimic_chunks)))\n\n mimic_df = pd.concat(mimic_chunk_dfs, sort=False, axis=0)\n mimic_df['source'] = ['mimic'] * mimic_df.shape[0]\n columbia_df['source'] = ['columbia'] * columbia_df.shape[0]\n full_df = pd.concat([mimic_df, columbia_df], sort=False, axis=0)\n full_df.dropna(inplace=True)\n out_fn = os.path.join(base_dir, 'all_prototype_contexts.csv')\n print('Saving a whopping {} contexts to {}'.format(full_df.shape[0], out_fn))\n\n # Append requisite short forms to the dataframe\n full_df['sf'] = full_df['form'].apply(lambda k: form_to_sf[k])\n full_df.to_csv(out_fn, index=False)\n","sub_path":"expansion_etl/context_extraction/collect_contexts.py","file_name":"collect_contexts.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"334635826","text":"# Copyright 2016 Akanda, Inc.\n#\n# Author: Akanda, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport __builtin__\n\nfrom unittest2 import TestCase\nimport mock\n\nfrom test.unit import fakes\n\nfrom astara_router.drivers import conntrackd\n\n\nclass ConntrackddManagerTestCase(TestCase):\n def setUp(self):\n super(ConntrackddManagerTestCase, self).setUp()\n self.mgr = conntrackd.ConntrackdManager()\n self.mgr._config_templ = mock.Mock(\n render=mock.Mock()\n )\n\n @mock.patch('astara_router.utils.execute')\n @mock.patch('astara_router.utils.replace_file')\n @mock.patch('astara_router.utils.hash_file')\n def test_save_config(self, fake_hash, fake_replace, fake_execute):\n fake_generic_to_host = mock.Mock(return_value='eth0')\n fake_interface = fakes.fake_interface()\n fake_mgt_interface = fakes.fake_mgt_interface()\n ha_config = {\n 'peers': ['10.0.0.2'],\n }\n fake_config = mock.Mock(\n interfaces=[fake_interface, fake_mgt_interface],\n ha_config=ha_config,\n )\n\n fake_hash.side_effect = ['hash1', 'hash2']\n self.mgr._config_templ.render.return_value = 'new_config'\n self.mgr.save_config(fake_config, fake_generic_to_host)\n self.mgr._config_templ.render.assert_called_with(dict(\n source_address=str(fake_mgt_interface.addresses[0].ip),\n management_ip_version=4,\n destination_address='10.0.0.2',\n interface='eth0',\n ))\n self.assertTrue(self.mgr._should_restart)\n fake_replace.assert_called_with('/tmp/conntrackd.conf', 'new_config')\n fake_execute.assert_called_with(\n ['mv', '/tmp/conntrackd.conf', '/etc/conntrackd/conntrackd.conf'],\n self.mgr.root_helper)\n\n @mock.patch.object(conntrackd.ConntrackdManager, 'sudo')\n def test_restart(self, fake_sudo):\n self.mgr._should_restart = True\n self.mgr.restart()\n fake_sudo.assert_called_with('conntrackd', 'restart')\n\n @mock.patch.object(conntrackd.ConntrackdManager, 'sudo')\n def test_restart_skip(self, fake_sudo):\n self.mgr._should_restart = False\n self.mgr.restart()\n self.assertFalse(fake_sudo.called)\n","sub_path":"test/unit/drivers/test_conntrackd.py","file_name":"test_conntrackd.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"210555778","text":"# -*- coding: utf-8 -*-\n# Get comtypes from:\n# sourceforge -- http://sourceforge.net/projects/comtypes/files/comtypes/\n# or\n# PyPI -- https://pypi.python.org/pypi/comtypes\nfrom __future__ import print_function\nimport comtypes\nimport comtypes.client as client\nimport numpy as np\nfrom tqdm import tqdm\nGamryCOM = client.GetModule(['{BD962F0D-A990-4823-9CF5-284D1CDD9C6D}', 1, 0])\n\n\n# Alternatively:\n# GamryCOM=client.GetModule(r'C:\\Program Files\\Gamry Instruments\\Framework 6\\GamryCOM.exe')\n\n# utilities: #####################\nclass GamryCOMError(Exception):\n pass\n\n\ndef gamry_error_decoder(e):\n if isinstance(e, comtypes.COMError):\n hresult = 2 ** 32 + e.args[0]\n if hresult & 0x20000000:\n return GamryCOMError('0x{0:08x}: {1}'.format(2 ** 32 + e.args[0], e.args[1]))\n return e\n\n\nclass GamryDtaqEvents(object):\n def __init__(self, dtaq):\n self.dtaq = dtaq\n self.acquired_points = []\n\n def cook(self):\n count = 1\n while count > 0:\n count, points = self.dtaq.Cook(10)\n # The columns exposed by GamryDtaq.Cook vary by dtaq and are\n # documented in the Toolkit Reference Manual.\n self.acquired_points.extend(zip(*points))\n\n def _IGamryDtaqEvents_OnDataAvailable(self, this):\n self.cook()\n\n def _IGamryDtaqEvents_OnDataDone(self, this):\n self.cook() # a final cook\n # TODO: indicate completion to enclosing code?\n\n\n###############################\n\ndevices = client.CreateObject('GamryCOM.GamryDeviceList')\nprint(devices.EnumSections())\n\npstat = client.CreateObject('GamryCOM.GamryPC6Pstat')\npstat.Init(devices.EnumSections()[0]) # grab first pstat\n\n################################ OCV\n\ndtaqcpiv = client.CreateObject('GamryCOM.GamryDtaqOcv')\npstat.Open()\ndtaqcpiv.Init(pstat)\npstat.SetCell(GamryCOM.CellOn)\ndtaqsink = GamryDtaqEvents(dtaqcpiv)\nconnection = client.GetEvents(dtaqcpiv, dtaqsink)\ntry:\n dtaqcpiv.Run(True)\nexcept Exception as e:\n raise gamry_error_decoder(e)\nclient.PumpEvents(3)\npstat.SetCell(GamryCOM.CellOff)\npstat.Close()\n\n\n################################ EIS\nfrom tqdm import tqdm\nZreal,Zimag,Zsig,Zphz,Zfreq = [],[],[],[],[]\nis_on = False\npstat.Open()\nfor f in tqdm(np.logspace(0,5,60)):\n\n dtaqcpiv = client.CreateObject('GamryCOM.GamryDtaqEis')\n dtaqcpiv.Init(pstat,f,0.1,0.5,2)\n dtaqcpiv.SetCycleMin(10)\n dtaqcpiv.SetCycleMax(5000)\n\n if not is_on:\n pstat.SetCell(GamryCOM.CellOn)\n is_on = True\n dtaqsink = GamryDtaqEvents(dtaqcpiv)\n\n connection = client.GetEvents(dtaqcpiv, dtaqsink)\n\n try:\n dtaqcpiv.Run(True)\n except Exception as e:\n raise gamry_error_decoder(e)\n if f<10:\n client.PumpEvents(10)\n else:\n client.PumpEvents(1)\n\n Zreal.append(dtaqsink.dtaq.Zreal())\n Zimag.append(dtaqsink.dtaq.Zimag())\n Zsig.append(dtaqsink.dtaq.Zsig())\n Zphz.append(dtaqsink.dtaq.Zphz())\n Zfreq.append(dtaqsink.dtaq.Zfreq())\n print(dtaqsink.dtaq.Zfreq())\n del connection\npstat.SetCell(GamryCOM.CellOff)\npstat.Close()\n\nimport matplotlib.pyplot as plt\nplt.scatter(np.array(Zreal),-np.array(Zimag),c=np.log(Zfreq))\nplt.colorbar()\nplt.axis('equal')\nplt.show()\n\n\nfrom impedance.circuits import CustomCircuit\nfrequencies = np.array(Zfreq)\nZ = np.array(Zreal)+1j*np.array(Zimag)\ncircuit = 'R0-p(R1,C1)'\nguess = [np.min(Z.real),np.max(Z.real),10**-6]\ncircuit_mod = CustomCircuit(circuit,initial_guess=guess)\ncircuit_mod.fit(frequencies, Z)\n\nZ_fit = circuit_mod.predict(frequencies)\n\nimport matplotlib.pyplot as plt\nfrom impedance.plotting import plot_nyquist\n\nfig, ax = plt.subplots()\nplot_nyquist(ax, frequencies, Z, fmt='o')\nplot_nyquist(ax, frequencies, Z_fit, fmt='-')\n\nplt.legend(['Data', 'Fit'])\nplt.show()\n","sub_path":"eis_test_aq.py","file_name":"eis_test_aq.py","file_ext":"py","file_size_in_byte":3750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"488400097","text":"import os\nimport requests\nimport json\n\ngithub_api = 'https://api.github.com/users/{0:s}/repos?access_token={1:s}&per_page={2:d}'\nyour_username = ''\nyour_personal_access_token = ''\npageSize = 100\n\nres = requests.get(github_api.format(your_username, your_personal_access_token, pageSize))\nprojects = json.loads(res.text)\nmax_len = 0\n\nfor p in projects:\n ssh_url = p['ssh_url']\n print(ssh_url)\n os.system(\"git clone \" + ssh_url)\n","sub_path":"clone_your_all_repsitories_in_github.py","file_name":"clone_your_all_repsitories_in_github.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"153314494","text":"from flask import request\nfrom dotenv import load_dotenv\nimport os\n\nclass Settings:\n \n load_dotenv() # Inicializando las variables de entorno\n\n # Configuraciones del servidor\n\n PORT = os.getenv('PORT')\n\n DEBUG = False\n\n JWT_SECRET_KEY = os.getenv('JWT_SECRET_KEY') # Valor almacenado en una variable de entorno\n\n JWT_BLACKLIST_ENABLED = True\n\n JWT_BLACKLIST_TOKEN_CHECKS = ['access', 'refresh']\n\n CORS_HEADERS = ['Content-Type','Authorization']\n\n ALLOWED_EXTENSIONS = [\"JPEG\", \"JPG\", \"PNG\", \"GIF\"]\n\n\n # Configuraciones de Json Web Token\n\n def __init__(self,jwt):\n \n blacklist = set()\n \n # Agregar el username a los tokens\n @jwt.user_claims_loader\n def add_claims_to_access_token(identity):\n username = request.json['username']\n return {\n 'user': username.strip(),\n }\n\n # verificar si el token esta en el blacklist\n @jwt.token_in_blacklist_loader\n def check_if_token_in_blacklist(decrypted_token):\n jti = decrypted_token['jti']\n return jti in blacklist\n\n ","sub_path":"app/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"539439460","text":"import threading\nimport json\n\nfrom Logging.Logging import Logging\n\n\n\nclass DigitalOutContract:\n\n __lock = threading.RLock()\n\n def __init__(self):\n self.c1_b0 = 0x00 # C1: B0: Do0 - Do7\n self.c1_b1 = 0x00 # C1: B1: Do8 - Do15\n self.c1_b2 = 0x00 # C1: B2: Do16 - Do23\n self.c1_b3 = 0x00 # C1: B3: Do24 - Do31\n self.c2_b0 = 0x00 # C2: B0: Do0 - Do7\n self.c2_b1 = 0x00 # C2: B1: Do8 - Do15\n self.c2_b2 = 0x00 # C2: B2: Do16 - Do23\n self.c2_b3 = 0x00 # C2: B3: Do24 - Do31\n self.LN2_P_EN = False # C 1: Do 0 -\n self.LN2_S_EN = False # C 1: Do 1 -\n self.LN2_Sol_EN = False # C 1: Do 2 -\n self.notUsed1 = False # C 1: Do 3 -\n self.notUsed2 = False # C 1: Do 4 -\n self.notUsed3 = False # C 1: Do 5 -\n self.notUsed4 = False # C 1: Do 6 -\n self.notUsed5 = False # C 1: Do 7 -\n self.notUsed6 = False # C 1: Do 8 -\n self.notUsed7 = False # C 1: Do 9 -\n self.notUsed8 = False # C 1: Do 10-\n self.notUsed9 = False # C 1: Do 11-\n self.notUsed10 = False # C 1: Do 12-\n self.notUsed11 = False # C 1: Do 13-\n self.notUsed12 = False # C 1: Do 14-\n self.notUsed13 = False # C 1: Do 15-\n self.IR_Lamp_1 = False # C 1: Do 16- Zone 1a\n self.IR_Lamp_2 = False # C 1: Do 17- Zone 1b\n self.IR_Lamp_3 = False # C 1: Do 18- Zone 2a\n self.IR_Lamp_4 = False # C 1: Do 19- Zone 2b\n self.IR_Lamp_5 = False # C 1: Do 20- Zone 3a\n self.IR_Lamp_6 = False # C 1: Do 21- Zone 3b\n self.IR_Lamp_7 = False # C 1: Do 22- Zone 4a\n self.IR_Lamp_8 = False # C 1: Do 23- Zone 4b\n self.IR_Lamp_9 = False # C 1: Do 24- Zone 5a\n self.IR_Lamp_10 = False # C 1: Do 25- Zone 5b\n self.IR_Lamp_11 = False # C 1: Do 26- Zone 6a\n self.IR_Lamp_12 = False # C 1: Do 27- Zone 6b\n self.IR_Lamp_13 = False # C 1: Do 28- Zone 7a\n self.IR_Lamp_14 = False # C 1: Do 29- Zone 7b\n self.IR_Lamp_15 = False # C 1: Do 30- Zone 8a\n self.IR_Lamp_16 = False # C 1: Do 31- Zone 8b\n self.Heater_1 = False # C 2: Do 0 - Platen Heaters 1 & 2 - Zone 9\n self.Heater_2 = False # C 2: Do 1 - Platen Heaters 3 & 4 - Zone 9\n self.Heater_3 = False # C 2: Do 2 - Platen Heaters 5 & 6 - Zone 9\n self.Heater_4 = False # C 2: Do 3 - Platen Heaters 7 & 8 - Zone 9\n self.Heater_5 = False # C 2: Do 4 - Platen Heaters 9 & 10 - Zone 9\n self.Heater_6 = False # C 2: Do 5 - Platen Heaters 11 & 12 - Zone 9\n self.Heater_7 = False # C 2: Do 6 -\n self.Heater_8 = False # C 2: Do 7 -\n self.Heater_9 = False # C 2: Do 8 -\n self.Heater_10 = False # C 2: Do 9 -\n self.Heater_11 = False # C 2: Do 10-\n self.Heater_12 = False # C 2: Do 11-\n self.Heater_13 = False # C 2: Do 12-\n self.Heater_14 = False # C 2: Do 13-\n self.Heater_15 = False # C 2: Do 14-\n self.Heater_16 = False # C 2: Do 15-\n self.Heater_17 = False # C 2: Do 16-\n self.Heater_18 = False # C 2: Do 17-\n self.Heater_19 = False # C 2: Do 18-\n self.Heater_20 = False # C 2: Do 19-\n self.Heater_21 = False # C 2: Do 20-\n self.MCC_Power = False # C 2: Do 21-\n self.MCC2_Power = False # C 2: Do 22-\n self.RoughP_GateValve = False # C 2: Do 23-\n self.RoughP_Start = False # C 2: Do 24-\n self.CryoP_GateValve = False # C 2: Do 25-\n self.RoughP_PurgeGass = False # C 2: Do 26-\n self.LN2_S_Sol = False # C 2: Do 27-\n self.LN2_P_Sol = False # C 2: Do 28-\n self.CryoP1_PwrRelay = False # C 2: Do 29-\n self.CryoP2_PwrRelay = False # C 2: Do 30-\n self.RoughP_PwrRelay = False # C 2: Do 31-\n # Lamps PWM duty cycle range: 0 - 1\n self.IR_Lamps_pwm_dc = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n # 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16\n\n def update(self, d):\n self.__lock.acquire()\n if 'C1 B0' in d:\n self.c1_b0 = d['C1 B0']\n self.LN2_P_EN = ((self.c1_b0 & 0x01) > 0) # C 1: Do 0\n self.LN2_S_EN = ((self.c1_b0 & 0x02) > 0) # C 1: Do 1\n self.LN2_Sol_EN = ((self.c1_b0 & 0x04) > 0) # C 1: Do 2\n self.notUsed1 = ((self.c1_b0 & 0x08) > 0) # C 1: Do 3\n self.notUsed2 = ((self.c1_b0 & 0x10) > 0) # C 1: Do 4\n self.notUsed3 = ((self.c1_b0 & 0x20) > 0) # C 1: Do 5\n self.notUsed4 = ((self.c1_b0 & 0x40) > 0) # C 1: Do 6\n self.notUsed5 = ((self.c1_b0 & 0x80) > 0) # C 1: Do 7\n if 'LN2-P EN' in d:\n self.LN2_P_EN = d['LN2-P EN']\n if self.LN2_P_EN:\n self.c1_b0 |= 0x01 # C 1: Do 0\n else:\n self.c1_b0 &= ~0x01\n if 'LN2-S EN' in d:\n self.LN2_S_EN = d['LN2-S EN']\n if self.LN2_S_EN:\n self.c1_b0 |= 0x02 # C 1: Do 1\n else:\n self.c1_b0 &= ~0x02\n if 'LN2-Sol EN' in d:\n self.LN2_Sol_EN = d['LN2-Sol EN']\n if self.LN2_Sol_EN:\n self.c1_b0 |= 0x04 # C 1: Do 2\n else:\n self.c1_b0 &= ~0x04\n if 'notUsed1' in d:\n self.notUsed1 = d['notUsed1']\n if self.notUsed1:\n self.c1_b0 |= 0x08 # C 1: Do 3\n else:\n self.c1_b0 &= ~0x08\n if 'notUsed2' in d:\n self.notUsed2 = d['notUsed2']\n if self.notUsed2:\n self.c1_b0 |= 0x10 # C 1: Do 4\n else:\n self.c1_b0 &= ~0x10\n if 'notUsed3' in d:\n self.notUsed3 = d['notUsed3']\n if self.notUsed3:\n self.c1_b0 |= 0x20 # C 1: Do 5\n else:\n self.c1_b0 &= ~0x20\n if 'notUsed4' in d:\n self.notUsed4 = d['notUsed4']\n if self.notUsed4:\n self.c1_b0 |= 0x40 # C 1: Do 6\n else:\n self.c1_b0 &= ~0x40\n if 'notUsed5' in d:\n self.notUsed5 = d['notUsed5']\n if self.notUsed5:\n self.c1_b0 |= 0x80 # C 1: Do 7\n else:\n self.c1_b0 &= ~0x80\n\n if 'C1 B1' in d:\n self.c1_b1 = d['C1 B1']\n self.notUsed6 = ((self.c1_b1 & 0x01) > 0) # C 1: Do 8\n self.notUsed7 = ((self.c1_b1 & 0x02) > 0) # C 1: Do 9\n self.notUsed8 = ((self.c1_b1 & 0x04) > 0) # C 1: Do 10\n self.notUsed9 = ((self.c1_b1 & 0x08) > 0) # C 1: Do 11\n self.notUsed10 = ((self.c1_b1 & 0x10) > 0) # C 1: Do 12\n self.notUsed11 = ((self.c1_b1 & 0x20) > 0) # C 1: Do 13\n self.notUsed12 = ((self.c1_b1 & 0x40) > 0) # C 1: Do 14\n self.notUsed13 = ((self.c1_b1 & 0x80) > 0) # C 1: Do 15\n if 'notUsed6' in d:\n self.notUsed6 = d['notUsed6']\n if self.notUsed6:\n self.c1_b1 |= 0x01 # C 1: Do 8\n else:\n self.c1_b1 &= ~0x01\n if 'notUsed7' in d:\n self.notUsed7 = d['notUsed7']\n if self.notUsed7:\n self.c1_b1 |= 0x02 # C 1: Do 9\n else:\n self.c1_b1 &= ~0x02\n if 'notUsed8' in d:\n self.notUsed8 = d['notUsed8']\n if self.notUsed8:\n self.c1_b1 |= 0x04 # C 1: Do 10\n else:\n self.c1_b1 &= ~0x04\n if 'notUsed9' in d:\n self.notUsed9 = d['notUsed9']\n if self.notUsed9:\n self.c1_b1 |= 0x08 # C 1: Do 11\n else:\n self.c1_b1 &= ~0x08\n if 'notUsed10' in d:\n self.notUsed10 = d['notUsed10']\n if self.notUsed10:\n self.c1_b1 |= 0x10 # C 1: Do 12\n else:\n self.c1_b1 &= ~0x10\n if 'notUsed11' in d:\n self.notUsed11 = d['notUsed11']\n if self.notUsed11:\n self.c1_b1 |= 0x20 # C 1: Do 13\n else:\n self.c1_b1 &= ~0x20\n if 'notUsed12' in d:\n self.notUsed12 = d['notUsed12']\n if self.notUsed12:\n self.c1_b1 |= 0x40 # C 1: Do 14\n else:\n self.c1_b1 &= ~0x40\n if 'notUsed13' in d:\n self.notUsed13 = d['notUsed13']\n if self.notUsed13:\n self.c1_b1 |= 0x80 # C 1: Do 15\n else:\n self.c1_b1 &= ~0x80\n\n if 'C1 B2' in d:\n self.c1_b2 = d['C1 B2']\n self.IR_Lamp_1 = ((self.c1_b2 & 0x01) > 0) # C 1: Do 16\n self.IR_Lamp_2 = ((self.c1_b2 & 0x02) > 0) # C 1: Do 17\n self.IR_Lamp_3 = ((self.c1_b2 & 0x04) > 0) # C 1: Do 18\n self.IR_Lamp_4 = ((self.c1_b2 & 0x08) > 0) # C 1: Do 19\n self.IR_Lamp_5 = ((self.c1_b2 & 0x10) > 0) # C 1: Do 20\n self.IR_Lamp_6 = ((self.c1_b2 & 0x20) > 0) # C 1: Do 21\n self.IR_Lamp_7 = ((self.c1_b2 & 0x40) > 0) # C 1: Do 22\n self.IR_Lamp_8 = ((self.c1_b2 & 0x80) > 0) # C 1: Do 23\n if 'IR Lamp 1' in d:\n self.IR_Lamp_1 = d['IR Lamp 1']\n if self.IR_Lamp_1:\n self.c1_b2 |= 0x01 # C 1: Do 16\n else:\n self.c1_b2 &= ~0x01\n if 'IR Lamp 2' in d:\n self.IR_Lamp_2 = d['IR Lamp 2']\n if self.IR_Lamp_2:\n self.c1_b2 |= 0x02 # C 1: Do 17\n else:\n self.c1_b2 &= ~0x02\n if 'IR Lamp 3' in d:\n self.IR_Lamp_3 = d['IR Lamp 3']\n if self.IR_Lamp_3:\n self.c1_b2 |= 0x04 # C 1: Do 18\n else:\n self.c1_b2 &= ~0x04\n if 'IR Lamp 4' in d:\n self.IR_Lamp_4 = d['IR Lamp 4']\n if self.IR_Lamp_4:\n self.c1_b2 |= 0x08 # C 1: Do 19\n else:\n self.c1_b2 &= ~0x08\n if 'IR Lamp 5' in d:\n self.IR_Lamp_5 = d['IR Lamp 5']\n if self.IR_Lamp_5:\n self.c1_b2 |= 0x10 # C 1: Do 20\n else:\n self.c1_b2 &= ~0x10\n if 'IR Lamp 6' in d:\n self.IR_Lamp_6 = d['IR Lamp 6']\n if self.IR_Lamp_6:\n self.c1_b2 |= 0x20 # C 1: Do 21\n else:\n self.c1_b2 &= ~0x20\n if 'IR Lamp 7' in d:\n self.IR_Lamp_7 = d['IR Lamp 7']\n if self.IR_Lamp_7:\n self.c1_b2 |= 0x40 # C 1: Do 22\n else:\n self.c1_b2 &= ~0x40\n if 'IR Lamp 8' in d:\n self.IR_Lamp_8 = d['IR Lamp 8']\n if self.IR_Lamp_8:\n self.c1_b2 |= 0x80 # C 1: Do 23\n else:\n self.c1_b2 &= ~0x80\n\n if 'C1 B3' in d:\n self.c1_b3 = d['C1 B3']\n self.IR_Lamp_9 = ((self.c1_b3 & 0x01) > 0) # C 1: Do 24\n self.IR_Lamp_10 = ((self.c1_b3 & 0x02) > 0) # C 1: Do 25\n self.IR_Lamp_11 = ((self.c1_b3 & 0x04) > 0) # C 1: Do 26\n self.IR_Lamp_12 = ((self.c1_b3 & 0x08) > 0) # C 1: Do 27\n self.IR_Lamp_13 = ((self.c1_b3 & 0x10) > 0) # C 1: Do 28\n self.IR_Lamp_14 = ((self.c1_b3 & 0x20) > 0) # C 1: Do 29\n self.IR_Lamp_15 = ((self.c1_b3 & 0x40) > 0) # C 1: Do 30\n self.IR_Lamp_16 = ((self.c1_b3 & 0x80) > 0) # C 1: Do 31\n if 'IR Lamp 9' in d:\n self.IR_Lamp_9 = d['IR Lamp 9']\n if self.IR_Lamp_9:\n self.c1_b3 |= 0x01 # C 1: Do 24\n else:\n self.c1_b3 &= ~0x01\n if 'IR Lamp 10' in d:\n self.IR_Lamp_10 = d['IR Lamp 10']\n if self.IR_Lamp_10:\n self.c1_b3 |= 0x02 # C 1: Do 25\n else:\n self.c1_b3 &= ~0x02\n if 'IR Lamp 11' in d:\n self.IR_Lamp_11 = d['IR Lamp 11']\n if self.IR_Lamp_11:\n self.c1_b3 |= 0x04 # C 1: Do 26\n else:\n self.c1_b3 &= ~0x04\n if 'IR Lamp 12' in d:\n self.IR_Lamp_12 = d['IR Lamp 12']\n if self.IR_Lamp_12:\n self.c1_b3 |= 0x08 # C 1: Do 27\n else:\n self.c1_b3 &= ~0x08\n if 'IR Lamp 13' in d:\n self.IR_Lamp_13 = d['IR Lamp 13']\n if self.IR_Lamp_13:\n self.c1_b3 |= 0x10 # C 1: Do 28\n else:\n self.c1_b3 &= ~0x10\n if 'IR Lamp 14' in d:\n self.IR_Lamp_14 = d['IR Lamp 14']\n if self.IR_Lamp_14:\n self.c1_b3 |= 0x20 # C 1: Do 29\n else:\n self.c1_b3 &= ~0x20\n if 'IR Lamp 15' in d:\n self.IR_Lamp_15 = d['IR Lamp 15']\n if self.IR_Lamp_15:\n self.c1_b3 |= 0x40 # C 1: Do 30\n else:\n self.c1_b3 &= ~0x40\n if 'IR Lamp 16' in d:\n self.IR_Lamp_16 = d['IR Lamp 16']\n if self.IR_Lamp_16:\n self.c1_b3 |= 0x80 # C 1: Do 31\n else:\n self.c1_b3 &= ~0x80\n\n if 'C2 B0' in d:\n self.c2_b0 = d['C2 B0']\n self.Heater_1 = ((self.c2_b0 & 0x01) > 0) # C 2: Do 0\n self.Heater_2 = ((self.c2_b0 & 0x02) > 0) # C 2: Do 1\n self.Heater_3 = ((self.c2_b0 & 0x04) > 0) # C 2: Do 2\n self.Heater_4 = ((self.c2_b0 & 0x08) > 0) # C 2: Do 3\n self.Heater_5 = ((self.c2_b0 & 0x10) > 0) # C 2: Do 4\n self.Heater_6 = ((self.c2_b0 & 0x20) > 0) # C 2: Do 5\n self.Heater_7 = ((self.c2_b0 & 0x40) > 0) # C 2: Do 6\n self.Heater_8 = ((self.c2_b0 & 0x80) > 0) # C 2: Do 7\n if 'Heater SSR 1' in d:\n self.Heater_1 = d['Heater SSR 1']\n if self.Heater_1:\n self.c2_b0 |= 0x01 # C 2: Do 0\n else:\n self.c2_b0 &= ~0x01\n if 'Heater SSR 2' in d:\n self.Heater_2 = d['Heater SSR 2']\n if self.Heater_2:\n self.c2_b0 |= 0x02 # C 2: Do 1\n else:\n self.c2_b0 &= ~0x02\n if 'Heater SSR 3' in d:\n self.Heater_3 = d['Heater SSR 3']\n if self.Heater_3:\n self.c2_b0 |= 0x04 # C 2: Do 2\n else:\n self.c2_b0 &= ~0x04\n if 'Heater SSR 4' in d:\n self.Heater_4 = d['Heater SSR 4']\n if self.Heater_4:\n self.c2_b0 |= 0x08 # C 2: Do 3\n else:\n self.c2_b0 &= ~0x08\n if 'Heater SSR 5' in d:\n self.Heater_5 = d['Heater SSR 5']\n if self.Heater_5:\n self.c2_b0 |= 0x10 # C 2: Do 4\n else:\n self.c2_b0 &= ~0x10\n if 'Heater SSR 6' in d:\n self.Heater_6 = d['Heater SSR 6']\n if self.Heater_6:\n self.c2_b0 |= 0x20 # C 2: Do 5\n else:\n self.c2_b0 &= ~0x20\n if 'Heater SSR 7' in d:\n self.Heater_7 = d['Heater SSR 7']\n if self.Heater_7:\n self.c2_b0 |= 0x40 # C 2: Do 6\n else:\n self.c2_b0 &= ~0x40\n if 'Heater SSR 8' in d:\n self.Heater_8 = d['Heater SSR 8']\n if self.Heater_8:\n self.c2_b0 |= 0x80 # C 2: Do 7\n else:\n self.c2_b0 &= ~0x80\n\n if 'C2 B1' in d:\n self.c2_b1 = d['C2 B1']\n self.Heater_9 = ((self.c2_b1 & 0x01) > 0) # C 2: Do 8\n self.Heater_10 = ((self.c2_b1 & 0x02) > 0) # C 2: Do 9\n self.Heater_11 = ((self.c2_b1 & 0x04) > 0) # C 2: Do 10\n self.Heater_12 = ((self.c2_b1 & 0x08) > 0) # C 2: Do 11\n self.Heater_13 = ((self.c2_b1 & 0x10) > 0) # C 2: Do 12\n self.Heater_14 = ((self.c2_b1 & 0x20) > 0) # C 2: Do 13\n self.Heater_15 = ((self.c2_b1 & 0x40) > 0) # C 2: Do 14\n self.Heater_16 = ((self.c2_b1 & 0x80) > 0) # C 2: Do 15\n if 'Heater SSR 9' in d:\n self.Heater_9 = d['Heater SSR 9']\n if self.Heater_9:\n self.c2_b1 |= 0x01 # C 2: Do 8\n else:\n self.c2_b1 &= ~0x01\n if 'Heater SSR 10' in d:\n self.Heater_10 = d['Heater SSR 10']\n if self.Heater_10:\n self.c2_b1 |= 0x02 # C 2: Do 9\n else:\n self.c2_b1 &= ~0x02\n if 'Heater SSR 11' in d:\n self.Heater_11 = d['Heater SSR 11']\n if self.Heater_11:\n self.c2_b1 |= 0x04 # C 2: Do 10\n else:\n self.c2_b1 &= ~0x04\n if 'Heater SSR 12' in d:\n self.Heater_12 = d['Heater SSR 12']\n if self.Heater_12:\n self.c2_b1 |= 0x08 # C 2: Do 11\n else:\n self.c2_b1 &= ~0x08\n if 'Heater SSR 13' in d:\n self.Heater_13 = d['Heater SSR 13']\n if self.Heater_13:\n self.c2_b1 |= 0x10 # C 2: Do 12\n else:\n self.c2_b1 &= ~0x10\n if 'Heater SSR 14' in d:\n self.Heater_14 = d['Heater SSR 14']\n if self.Heater_14:\n self.c2_b1 |= 0x20 # C 2: Do 13\n else:\n self.c2_b1 &= ~0x20\n if 'Heater SSR 15' in d:\n self.Heater_15 = d['Heater SSR 15']\n if self.Heater_15:\n self.c2_b1 |= 0x40 # C 2: Do 14\n else:\n self.c2_b1 &= ~0x40\n if 'Heater SSR 16' in d:\n self.Heater_16 = d['Heater SSR 16']\n if self.Heater_16:\n self.c2_b1 |= 0x80 # C 2: Do 15\n else:\n self.c2_b1 &= ~0x80\n\n if 'C2 B2' in d:\n self.c2_b2 = d['C2 B2']\n self.Heater_17 = ((self.c2_b2 & 0x01) > 0) # C 2: Do 16\n self.Heater_18 = ((self.c2_b2 & 0x02) > 0) # C 2: Do 17\n self.Heater_19 = ((self.c2_b2 & 0x04) > 0) # C 2: Do 18\n self.Heater_20 = ((self.c2_b2 & 0x08) > 0) # C 2: Do 19\n self.Heater_21 = ((self.c2_b2 & 0x10) > 0) # C 2: Do 20\n self.MCC_Power = ((self.c2_b2 & 0x20) > 0) # C 2: Do 21\n self.MCC2_Power = ((self.c2_b2 & 0x40) > 0) # C 2: Do 22\n self.RoughP_GateValve = ((self.c2_b2 & 0x80) > 0) # C 2: Do 23\n if 'Heater SSR 17' in d:\n self.Heater_17 = d['Heater SSR 17']\n if self.Heater_17:\n self.c2_b2 |= 0x01 # C 2: Do 16\n else:\n self.c2_b2 &= ~0x01\n if 'Heater SSR 18' in d:\n self.Heater_18 = d['Heater SSR 18']\n if self.Heater_18:\n self.c2_b2 |= 0x02 # C 2: Do 17\n else:\n self.c2_b2 &= ~0x02\n if 'Heater SSR 19' in d:\n self.Heater_19 = d['Heater SSR 19']\n if self.Heater_19:\n self.c2_b2 |= 0x04 # C 2: Do 18\n else:\n self.c2_b2 &= ~0x04\n if 'Heater SSR 20' in d:\n self.Heater_20 = d['Heater SSR 20']\n if self.Heater_20:\n self.c2_b2 |= 0x08 # C 2: Do 19\n else:\n self.c2_b2 &= ~0x08\n if 'Heater SSR 21' in d:\n self.Heater_21 = d['Heater SSR 21']\n if self.Heater_21:\n self.c2_b2 |= 0x10 # C 2: Do 20\n else:\n self.c2_b2 &= ~0x10\n if 'MCC Power' in d:\n self.MCC_Power = d['MCC Power']\n if self.MCC_Power:\n self.c2_b2 |= 0x20 # C 2: Do 21\n else:\n self.c2_b2 &= ~0x20\n if 'MCC2 Power' in d:\n self.MCC2_Power = d['MCC2 Power']\n if self.MCC2_Power:\n self.c2_b2 |= 0x40 # C 2: Do 22\n else:\n self.c2_b2 &= ~0x40\n if 'RoughP GateValve' in d:\n self.RoughP_GateValve = d['RoughP GateValve']\n if self.RoughP_GateValve:\n self.c2_b2 |= 0x80 # C 2: Do 23\n else:\n self.c2_b2 &= ~0x80\n\n if 'C2 B3' in d:\n self.c2_b3 = d['C2 B3']\n self.RoughP_Start = ((self.c2_b3 & 0x01) > 0) # C 2: Do 24\n self.CryoP_GateValve = ((self.c2_b3 & 0x02) > 0) # C 2: Do 25\n self.RoughP_PurgeGass = ((self.c2_b3 & 0x04) > 0) # C 2: Do 26\n self.LN2_S_Sol = ((self.c2_b3 & 0x08) > 0) # C 2: Do 27\n self.LN2_P_Sol = ((self.c2_b3 & 0x10) > 0) # C 2: Do 28\n self.CryoP1_PwrRelay = ((self.c2_b3 & 0x20) > 0) # C 2: Do 29\n self.CryoP2_PwrRelay = ((self.c2_b3 & 0x40) > 0) # C 2: Do 30\n self.RoughP_PwrRelay = ((self.c2_b3 & 0x80) > 0) # C 2: Do 31\n if 'RoughP Start' in d:\n self.RoughP_Start = d['RoughP Start']\n if self.RoughP_Start:\n self.c2_b3 |= 0x01 # C 2: Do 24\n else:\n self.c2_b3 &= ~0x01\n if 'CryoP GateValve' in d:\n self.CryoP_GateValve = d['CryoP GateValve']\n if self.CryoP_GateValve:\n self.c2_b3 |= 0x02 # C 2: Do 25\n else:\n self.c2_b3 &= ~0x02\n if 'RoughP PurgeGass' in d:\n self.RoughP_PurgeGass = d['RoughP PurgeGass']\n if self.RoughP_PurgeGass:\n self.c2_b3 |= 0x04 # C 2: Do 26\n else:\n self.c2_b3 &= ~0x04\n if 'LN2-S Sol' in d:\n self.LN2_S_Sol = d['LN2-S Sol']\n if self.LN2_S_Sol:\n self.c2_b3 |= 0x08 # C 2: Do 27\n else:\n self.c2_b3 &= ~0x08\n if 'LN2-P Sol' in d:\n self.LN2_P_Sol = d['LN2-P Sol']\n if self.LN2_P_Sol:\n self.c2_b3 |= 0x10 # C 2: Do 28\n else:\n self.c2_b3 &= ~0x10\n if 'CryoP Pwr Relay 1' in d:\n self.CryoP1_PwrRelay = d['CryoP Pwr Relay 1']\n if self.CryoP1_PwrRelay:\n self.c2_b3 |= 0x20 # C 2: Do 29\n else:\n self.c2_b3 &= ~0x20\n if 'CryoP Pwr Relay 2' in d:\n self.CryoP2_PwrRelay = d['CryoP Pwr Relay 2']\n if self.CryoP2_PwrRelay:\n self.c2_b3 |= 0x40 # C 2: Do 30\n else:\n self.c2_b3 &= ~0x40\n if 'RoughP Pwr Relay' in d:\n self.RoughP_PwrRelay = d['RoughP Pwr Relay']\n if self.RoughP_PwrRelay:\n self.c2_b3 |= 0x80 # C 2: Do 31\n else:\n self.c2_b3 &= ~0x80\n if 'IR Lamp 1 PWM DC' in d:\n self.IR_Lamps_pwm_dc[0] = d['IR Lamp 1 PWM DC']\n if 'IR Lamp 2 PWM DC' in d:\n self.IR_Lamps_pwm_dc[1] = d['IR Lamp 2 PWM DC']\n if 'IR Lamp 3 PWM DC' in d:\n self.IR_Lamps_pwm_dc[2] = d['IR Lamp 3 PWM DC']\n if 'IR Lamp 4 PWM DC' in d:\n self.IR_Lamps_pwm_dc[3] = d['IR Lamp 4 PWM DC']\n if 'IR Lamp 5 PWM DC' in d:\n self.IR_Lamps_pwm_dc[4] = d['IR Lamp 5 PWM DC']\n if 'IR Lamp 6 PWM DC' in d:\n self.IR_Lamps_pwm_dc[5] = d['IR Lamp 6 PWM DC']\n if 'IR Lamp 7 PWM DC' in d:\n self.IR_Lamps_pwm_dc[6] = d['IR Lamp 7 PWM DC']\n if 'IR Lamp 8 PWM DC' in d:\n self.IR_Lamps_pwm_dc[7] = d['IR Lamp 8 PWM DC']\n if 'IR Lamp 9 PWM DC' in d:\n self.IR_Lamps_pwm_dc[8] = d['IR Lamp 9 PWM DC']\n if 'IR Lamp 10 PWM DC' in d:\n self.IR_Lamps_pwm_dc[9] = d['IR Lamp 10 PWM DC']\n if 'IR Lamp 11 PWM DC' in d:\n self.IR_Lamps_pwm_dc[10] = d['IR Lamp 11 PWM DC']\n if 'IR Lamp 12 PWM DC' in d:\n self.IR_Lamps_pwm_dc[11] = d['IR Lamp 12 PWM DC']\n if 'IR Lamp 13 PWM DC' in d:\n self.IR_Lamps_pwm_dc[12] = d['IR Lamp 13 PWM DC']\n if 'IR Lamp 14 PWM DC' in d:\n self.IR_Lamps_pwm_dc[13] = d['IR Lamp 14 PWM DC']\n if 'IR Lamp 15 PWM DC' in d:\n self.IR_Lamps_pwm_dc[14] = d['IR Lamp 15 PWM DC']\n if 'IR Lamp 16 PWM DC' in d:\n self.IR_Lamps_pwm_dc[15] = d['IR Lamp 16 PWM DC']\n self.__lock.release()\n\n # Get the IR lamps PWM Duty Cycle: lamp_num range = (1 to 16)\n def get_IR_Lamps_pwm_dc(self, lamp_num):\n self.__lock.acquire()\n val = self.IR_Lamps_pwm_dc[lamp_num-1]\n self.__lock.release()\n return val\n\n def get_c1_b0(self):\n self.__lock.acquire()\n val = self.c1_b0\n self.__lock.release()\n return val\n\n def get_c1_b1(self):\n self.__lock.acquire()\n val = self.c1_b1\n self.__lock.release()\n return val\n\n def get_c1_b2(self):\n self.__lock.acquire()\n val = self.c1_b2\n self.__lock.release()\n return val\n\n def get_c1_b3(self):\n self.__lock.acquire()\n val = self.c1_b3\n self.__lock.release()\n return val\n\n def get_c2_b0(self):\n self.__lock.acquire()\n val = self.c2_b0\n self.__lock.release()\n return val\n\n def get_c2_b1(self):\n self.__lock.acquire()\n val = self.c2_b1\n self.__lock.release()\n return val\n\n def get_c2_b2(self):\n self.__lock.acquire()\n val = self.c2_b2\n self.__lock.release()\n return val\n\n def get_c2_b3(self):\n self.__lock.acquire()\n val = self.c2_b3\n self.__lock.release()\n return val\n\n def getVal(self, name):\n self.__lock.acquire()\n if name == 'LN2-P EN':\n val = self.LN2_P_EN\n elif name == 'LN2-S EN':\n val = self.LN2_S_EN\n elif name == 'LN2-Sol EN':\n val = self.LN2_Sol_EN\n elif name == 'notUsed1':\n val = self.notUsed1\n elif name == 'IR Lamp 1':\n val = self.IR_Lamp_1\n elif name == 'IR Lamp 2':\n val = self.IR_Lamp_2\n elif name == 'IR Lamp 3':\n val = self.IR_Lamp_3\n elif name == 'IR Lamp 4':\n val = self.IR_Lamp_4\n elif name == 'IR Lamp 5':\n val = self.IR_Lamp_5\n elif name == 'IR Lamp 6':\n val = self.IR_Lamp_6\n elif name == 'IR Lamp 7':\n val = self.IR_Lamp_7\n elif name == 'IR Lamp 8':\n val = self.IR_Lamp_8\n elif name == 'IR Lamp 9':\n val = self.IR_Lamp_9\n elif name == 'IR Lamp 10':\n val = self.IR_Lamp_10\n elif name == 'IR Lamp 11':\n val = self.IR_Lamp_11\n elif name == 'IR Lamp 12':\n val = self.IR_Lamp_12\n elif name == 'IR Lamp 13':\n val = self.IR_Lamp_13\n elif name == 'IR Lamp 14':\n val = self.IR_Lamp_14\n elif name == 'IR Lamp 15':\n val = self.IR_Lamp_15\n elif name == 'IR Lamp 16':\n val = self.IR_Lamp_16\n elif name == 'Heater SSR 1':\n val = self.Heater_1\n elif name == 'Heater SSR 2':\n val = self.Heater_2\n elif name == 'Heater SSR 3':\n val = self.Heater_3\n elif name == 'Heater SSR 4':\n val = self.Heater_4\n elif name == 'Heater SSR 5':\n val = self.Heater_5\n elif name == 'Heater SSR 6':\n val = self.Heater_6\n elif name == 'Heater SSR 7':\n val = self.Heater_7\n elif name == 'Heater SSR 8':\n val = self.Heater_8\n elif name == 'Heater SSR 9':\n val = self.Heater_9\n elif name == 'Heater SSR 10':\n val = self.Heater_10\n elif name == 'Heater SSR 11':\n val = self.Heater_11\n elif name == 'Heater SSR 12':\n val = self.Heater_12\n elif name == 'Heater SSR 13':\n val = self.Heater_13\n elif name == 'Heater SSR 14':\n val = self.Heater_14\n elif name == 'Heater SSR 15':\n val = self.Heater_15\n elif name == 'Heater SSR 16':\n val = self.Heater_16\n elif name == 'Heater SSR 17':\n val = self.Heater_17\n elif name == 'Heater SSR 18':\n val = self.Heater_18\n elif name == 'Heater SSR 19':\n val = self.Heater_19\n elif name == 'Heater SSR 20':\n val = self.Heater_20\n elif name == 'Heater SSR 21':\n val = self.Heater_21\n elif name == 'MCC Power':\n val = self.MCC_Power\n elif name == 'MCC2 Power':\n val = self.MCC2_Power\n elif name == 'RoughP GateValve':\n val = self.RoughP_GateValve\n elif name == 'RoughP Start':\n val = self.RoughP_Start\n elif name == 'CryoP GateValve':\n val = self.CryoP_GateValve\n elif name == 'RoughP PurgeGass':\n val = self.RoughP_PurgeGass\n elif name == 'LN2-S Sol':\n val = self.LN2_S_Sol\n elif name == 'LN2-P Sol':\n val = self.LN2_P_Sol\n elif name == 'CryoP Pwr Relay 1':\n val = self.CryoP1_PwrRelay\n elif name == 'CryoP Pwr Relay 2':\n val = self.CryoP2_PwrRelay\n elif name == 'RoughP Pwr Relay':\n val = self.RoughP_PwrRelay\n else: # Unknown Value!\n val = None\n self.__lock.release()\n return val\n\n def getJson(self):\n self.__lock.acquire()\n message = ['{\"LN2-P EN\":%s,' % json.dumps(self.LN2_P_EN),\n '\"LN2-S EN\":%s,' % json.dumps(self.LN2_S_EN),\n '\"LN2-Sol EN\":%s,' % json.dumps(self.LN2_Sol_EN),\n '\"notUsed1\":%s,' % json.dumps(self.notUsed1),\n '\"IR Lamp 1\":%s,' % json.dumps(self.IR_Lamp_1),\n '\"IR Lamp 2\":%s,' % json.dumps(self.IR_Lamp_2),\n '\"IR Lamp 3\":%s,' % json.dumps(self.IR_Lamp_3),\n '\"IR Lamp 4\":%s,' % json.dumps(self.IR_Lamp_4),\n '\"IR Lamp 5\":%s,' % json.dumps(self.IR_Lamp_5),\n '\"IR Lamp 6\":%s,' % json.dumps(self.IR_Lamp_6),\n '\"IR Lamp 7\":%s,' % json.dumps(self.IR_Lamp_7),\n '\"IR Lamp 8\":%s,' % json.dumps(self.IR_Lamp_8),\n '\"IR Lamp 9\":%s,' % json.dumps(self.IR_Lamp_9),\n '\"IR Lamp 10\":%s,' % json.dumps(self.IR_Lamp_10),\n '\"IR Lamp 11\":%s,' % json.dumps(self.IR_Lamp_11),\n '\"IR Lamp 12\":%s,' % json.dumps(self.IR_Lamp_12),\n '\"IR Lamp 13\":%s,' % json.dumps(self.IR_Lamp_13),\n '\"IR Lamp 14\":%s,' % json.dumps(self.IR_Lamp_14),\n '\"IR Lamp 15\":%s,' % json.dumps(self.IR_Lamp_15),\n '\"IR Lamp 16\":%s,' % json.dumps(self.IR_Lamp_16),\n '\"Heater SSR 1\":%s,' % json.dumps(self.Heater_1),\n '\"Heater SSR 2\":%s,' % json.dumps(self.Heater_2),\n '\"Heater SSR 3\":%s,' % json.dumps(self.Heater_3),\n '\"Heater SSR 4\":%s,' % json.dumps(self.Heater_4),\n '\"Heater SSR 5\":%s,' % json.dumps(self.Heater_5),\n '\"Heater SSR 6\":%s,' % json.dumps(self.Heater_6),\n '\"Heater SSR 7\":%s,' % json.dumps(self.Heater_7),\n '\"Heater SSR 8\":%s,' % json.dumps(self.Heater_8),\n '\"Heater SSR 9\":%s,' % json.dumps(self.Heater_9),\n '\"Heater SSR 10\":%s,' % json.dumps(self.Heater_10),\n '\"Heater SSR 11\":%s,' % json.dumps(self.Heater_11),\n '\"Heater SSR 12\":%s,' % json.dumps(self.Heater_12),\n '\"Heater SSR 13\":%s,' % json.dumps(self.Heater_13),\n '\"Heater SSR 14\":%s,' % json.dumps(self.Heater_14),\n '\"Heater SSR 15\":%s,' % json.dumps(self.Heater_15),\n '\"Heater SSR 16\":%s,' % json.dumps(self.Heater_16),\n '\"Heater SSR 17\":%s,' % json.dumps(self.Heater_17),\n '\"Heater SSR 18\":%s,' % json.dumps(self.Heater_18),\n '\"Heater SSR 19\":%s,' % json.dumps(self.Heater_19),\n '\"Heater SSR 20\":%s,' % json.dumps(self.Heater_20),\n '\"Heater SSR 21\":%s,' % json.dumps(self.Heater_21),\n '\"MCC Power\":%s,' % json.dumps(self.MCC_Power),\n '\"MCC2 Power\":%s,' % json.dumps(self.MCC2_Power),\n '\"RoughP GateValve\":%s,' % json.dumps(self.RoughP_GateValve),\n '\"RoughP Start\":%s,' % json.dumps(self.RoughP_Start),\n '\"CryoP GateValve\":%s,' % json.dumps(self.CryoP_GateValve),\n '\"RoughP PurgeGass\":%s,' % json.dumps(self.RoughP_PurgeGass),\n '\"LN2-S Sol\":%s,' % json.dumps(self.LN2_S_Sol),\n '\"LN2-P Sol\":%s,' % json.dumps(self.LN2_P_Sol),\n '\"CryoP Pwr Relay 1\":%s,' % json.dumps(self.CryoP1_PwrRelay),\n '\"CryoP Pwr Relay 2\":%s,' % json.dumps(self.CryoP2_PwrRelay),\n '\"RoughP Pwr Relay\":%s,' % json.dumps(self.RoughP_PwrRelay),\n '\"IR_Lamps_pwm_dc\":%s}' % json.dumps(self.IR_Lamps_pwm_dc)]\n self.__lock.release()\n return ''.join(message)\n","sub_path":"Sites/DataContracts/DigitalOutContract.py","file_name":"DigitalOutContract.py","file_ext":"py","file_size_in_byte":34082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"13716819","text":"# input() reads a string with a line of input, stripping the '\\n' (newline) at the end.\r\n# This is all you need for most Google Code Jam problems.\r\nimport copy\r\nimport math\r\nfrom pandas import *\r\nimport numpy\r\nsolutions = []\r\ndata = []\r\ncases = int(input()) # read a line with a single integer\r\nfor i in range(1, cases + 1):\r\n\tn, m = [int(s) for s in input().split(\" \")] \r\n\ttmp = [n,m,[]]\r\n\tfor j in range(1, m + 1):\r\n\t\ttmp[2].append([s for s in input().split(\" \")])\r\n\tdata.append(tmp)\t\r\n\t\r\n\t\r\n\t#print(DataFrame(tmp_copy[0]))\r\n\t\r\ndef check_rows(matrix,x,y):\r\n\ttmp = 0\r\n\tfor j in range(0,len(matrix[0])):\r\n\t\tif(matrix[x][j] == \"o\" or matrix[x][j] == \"x\" ):\r\n\t\t\t#print(str(x)+\",\"+str(j)+\"=\"+str(matrix[x][j]))\r\n\t\t\ttmp += 1\r\n\t\tif(matrix[j][y] == \"o\" or matrix[j][y] == \"x\" ):\r\n\t\t\t#print(str(j)+\",\"+str(y)+\"=\"+str(matrix[x][j]))\r\n\t\t\ttmp += 1\r\n\treturn tmp\r\n\r\ndef check_diagonals(matrix,x,y):\r\n\ttmp = 0\r\n\tfor k in range(1,len(matrix[0])):\r\n\t\tif(x+k-1 and y-k >-1):\r\n\t\t\tif(matrix[x-k][y-k] == \"o\" or matrix[x-k][y-k] == \"+\" ):\r\n\t\t\t\ttmp += 1\r\n\t\tif(x+k-1):\r\n\t\t\tif(matrix[x+k][y-k] == \"o\" or matrix[x+k][y-k] == \"+\" ):\r\n\t\t\t\ttmp += 1\r\n\t\tif(x-k>-1 and y+k0):\r\n\t\treturn False\r\n\tif(check_diagonals(field,x,y)>0):\r\n\t\treturn False\r\n\treturn True\r\n\r\ndef can_place_x(M,x,y):\r\n\tif(check_rows(field,x,y) >0):\r\n\t\treturn False\r\n\treturn True\t\r\n\r\ndef can_place_plus(M,x,y):\r\n\tif(check_diagonals(field,x,y) >0):\r\n\t\treturn False\r\n\treturn True\t\r\n\t\r\n\t\r\nfor item in data:\r\n\tcandidates = []\r\n\tpotential_moves = []\r\n\tknown = []\r\n\tfield = [[0]*item[0] for i in range(item[0])]\r\n\tfor model in item[2]:\r\n\t\tfield[int(model[1])-1][int(model[2])-1] = model[0]\r\n\tcnt= True\r\n\ttimer =0\r\n\tmoves = []\r\n\twhile(timer < 20):\r\n\t\ttimer +=1\r\n\t\tfor x in range(len(field)):\r\n\t\t\tfor y in range(len(field)):\r\n\t\t\t\tif(field[x][y] != 0):\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tif(can_place_o(field,x,y)):\r\n\t\t\t\t\t\tfield[x][y] = \"o\"\r\n\t\t\t\t\t\tmoves.append([\"o\",x,y])\r\n\t\t\t\telif(can_place_x(field,x,y)):\r\n\t\t\t\t\t\tfield[x][y] = \"x\"\r\n\t\t\t\t\t\tmoves.append([\"x\",x,y])\r\n\t\t\t\telif(can_place_plus(field,x,y)):\r\n\t\t\t\t\t\tfield[x][y] = \"+\"\r\n\t\t\t\t\t\tmoves.append([\"+\",x,y])\r\n\twhile(timer < 20):\r\n\t\ttimer +=1\r\n\t\tfor x in range(len(field)):\r\n\t\t\tfor y in range(len(field)):\r\n\t\t\t\tif(field[x][y] == 0):\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tif(can_place_o(field,x,y)):\r\n\t\t\t\t\t\tfield[x][y] = \"o\"\r\n\t\t\t\t\t\tmoves.append([\"o\",x,y])\r\n\r\n\t\r\n\t\t\r\n\tprint(1)\r\n\ttmp_score = 0\r\n\tprint(DataFrame(field))\r\n\tfor x in range(len(field)):\r\n\t\tfor y in range(len(field[x])):\r\n\t\t\tif(field[x][y] == \"o\"):\r\n\t\t\t\ttmp_score+= 2\r\n\t\t\tif(field[x][y] == \"x\" or field[x][y] == \"+\"):\r\n\t\t\t\ttmp_score+= 1\r\n\t\tscore = tmp_score\r\n\tsolutions.append([score,moves])\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\nx = 1\t\t\r\nfor item in solutions:\r\n\tprint(\"Case #\" +str(x)+ \": \" +str(item[0]) + \" \" + str(len(item[1])))\r\n\tfor line in item[1]:\r\n\t\tprint(line[0] + \" \" + str(line[1]+1) + \" \" + str(line[2]+1))\r\n\t\r\n\tx += 1\r\n\t\r\n\t","sub_path":"code jam/code jam 2/old/d/solve3.py","file_name":"solve3.py","file_ext":"py","file_size_in_byte":3193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"270056915","text":"# Bir sayı eğer 4 basamaklı ise ve oluşturan rakamlardan herbirinin 4. \n# kuvvetinin toplamı( 3 basamaklı sayılar için 3.kuvveti ) o sayıya eşitse \n# bu sayıya \"Armstrong\" sayısı denir.\n# Örnek olarak : 1634 = 1^4 + 6^4 + 3^4 + 4^4\n\na = input(\"Bir sayı giriniz : \")\n\nbasamak = len(a)\ntoplam = 0\nprint(\"Basamak sayısı : \", basamak)\nfor i in a:\n \n x = int(i) ** basamak\n print((int(i)), \" ^^ \" , basamak, \" = \", x)\n toplam += x\nprint(\"Toplam : \", toplam)\nif toplam == int(a):\n print (\"Girdiğiniz sayı armstrong sayısıdır\")\n\nelse:\n print(\"Armstrong sayısı değildir\")\n\n","sub_path":"armstrong.py","file_name":"armstrong.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"559333042","text":"# narrowbundle2.py - bundle2 extensions for narrow repository support\n#\n# Copyright 2017 Google, Inc.\n#\n# This software may be used and distributed according to the terms of the\n# GNU General Public License version 2 or any later version.\n\nfrom __future__ import absolute_import\n\nimport collections\nimport errno\nimport struct\n\nfrom mercurial.i18n import _\nfrom mercurial.node import (\n bin,\n nullid,\n nullrev,\n)\nfrom mercurial import (\n bundle2,\n changegroup,\n dagutil,\n error,\n exchange,\n extensions,\n narrowspec,\n repair,\n util,\n wireprototypes,\n)\nfrom mercurial.utils import (\n stringutil,\n)\n\nNARROWCAP = 'narrow'\n_NARROWACL_SECTION = 'narrowhgacl'\n_CHANGESPECPART = NARROWCAP + ':changespec'\n_SPECPART = NARROWCAP + ':spec'\n_SPECPART_INCLUDE = 'include'\n_SPECPART_EXCLUDE = 'exclude'\n_KILLNODESIGNAL = 'KILL'\n_DONESIGNAL = 'DONE'\n_ELIDEDCSHEADER = '>20s20s20sl' # cset id, p1, p2, len(text)\n_ELIDEDMFHEADER = '>20s20s20s20sl' # manifest id, p1, p2, link id, len(text)\n_CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER)\n_MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER)\n\n# When advertising capabilities, always include narrow clone support.\ndef getrepocaps_narrow(orig, repo, **kwargs):\n caps = orig(repo, **kwargs)\n caps[NARROWCAP] = ['v0']\n return caps\n\ndef _computeellipsis(repo, common, heads, known, match, depth=None):\n \"\"\"Compute the shape of a narrowed DAG.\n\n Args:\n repo: The repository we're transferring.\n common: The roots of the DAG range we're transferring.\n May be just [nullid], which means all ancestors of heads.\n heads: The heads of the DAG range we're transferring.\n match: The narrowmatcher that allows us to identify relevant changes.\n depth: If not None, only consider nodes to be full nodes if they are at\n most depth changesets away from one of heads.\n\n Returns:\n A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:\n\n visitnodes: The list of nodes (either full or ellipsis) which\n need to be sent to the client.\n relevant_nodes: The set of changelog nodes which change a file inside\n the narrowspec. The client needs these as non-ellipsis nodes.\n ellipsisroots: A dict of {rev: parents} that is used in\n narrowchangegroup to produce ellipsis nodes with the\n correct parents.\n \"\"\"\n cl = repo.changelog\n mfl = repo.manifestlog\n\n cldag = dagutil.revlogdag(cl)\n # dagutil does not like nullid/nullrev\n commonrevs = cldag.internalizeall(common - set([nullid])) | set([nullrev])\n headsrevs = cldag.internalizeall(heads)\n if depth:\n revdepth = {h: 0 for h in headsrevs}\n\n ellipsisheads = collections.defaultdict(set)\n ellipsisroots = collections.defaultdict(set)\n\n def addroot(head, curchange):\n \"\"\"Add a root to an ellipsis head, splitting heads with 3 roots.\"\"\"\n ellipsisroots[head].add(curchange)\n # Recursively split ellipsis heads with 3 roots by finding the\n # roots' youngest common descendant which is an elided merge commit.\n # That descendant takes 2 of the 3 roots as its own, and becomes a\n # root of the head.\n while len(ellipsisroots[head]) > 2:\n child, roots = splithead(head)\n splitroots(head, child, roots)\n head = child # Recurse in case we just added a 3rd root\n\n def splitroots(head, child, roots):\n ellipsisroots[head].difference_update(roots)\n ellipsisroots[head].add(child)\n ellipsisroots[child].update(roots)\n ellipsisroots[child].discard(child)\n\n def splithead(head):\n r1, r2, r3 = sorted(ellipsisroots[head])\n for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):\n mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',\n nr1, head, nr2, head)\n for j in mid:\n if j == nr2:\n return nr2, (nr1, nr2)\n if j not in ellipsisroots or len(ellipsisroots[j]) < 2:\n return j, (nr1, nr2)\n raise error.Abort('Failed to split up ellipsis node! head: %d, '\n 'roots: %d %d %d' % (head, r1, r2, r3))\n\n missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))\n visit = reversed(missing)\n relevant_nodes = set()\n visitnodes = [cl.node(m) for m in missing]\n required = set(headsrevs) | known\n for rev in visit:\n clrev = cl.changelogrevision(rev)\n ps = cldag.parents(rev)\n if depth is not None:\n curdepth = revdepth[rev]\n for p in ps:\n revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))\n needed = False\n shallow_enough = depth is None or revdepth[rev] <= depth\n if shallow_enough:\n curmf = mfl[clrev.manifest].read()\n if ps:\n # We choose to not trust the changed files list in\n # changesets because it's not always correct. TODO: could\n # we trust it for the non-merge case?\n p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()\n needed = bool(curmf.diff(p1mf, match))\n if not needed and len(ps) > 1:\n # For merge changes, the list of changed files is not\n # helpful, since we need to emit the merge if a file\n # in the narrow spec has changed on either side of the\n # merge. As a result, we do a manifest diff to check.\n p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()\n needed = bool(curmf.diff(p2mf, match))\n else:\n # For a root node, we need to include the node if any\n # files in the node match the narrowspec.\n needed = any(curmf.walk(match))\n\n if needed:\n for head in ellipsisheads[rev]:\n addroot(head, rev)\n for p in ps:\n required.add(p)\n relevant_nodes.add(cl.node(rev))\n else:\n if not ps:\n ps = [nullrev]\n if rev in required:\n for head in ellipsisheads[rev]:\n addroot(head, rev)\n for p in ps:\n ellipsisheads[p].add(rev)\n else:\n for p in ps:\n ellipsisheads[p] |= ellipsisheads[rev]\n\n # add common changesets as roots of their reachable ellipsis heads\n for c in commonrevs:\n for head in ellipsisheads[c]:\n addroot(head, c)\n return visitnodes, relevant_nodes, ellipsisroots\n\ndef _packellipsischangegroup(repo, common, match, relevant_nodes,\n ellipsisroots, visitnodes, depth, source, version):\n if version in ('01', '02'):\n raise error.Abort(\n 'ellipsis nodes require at least cg3 on client and server, '\n 'but negotiated version %s' % version)\n # We wrap cg1packer.revchunk, using a side channel to pass\n # relevant_nodes into that area. Then if linknode isn't in the\n # set, we know we have an ellipsis node and we should defer\n # sending that node's data. We override close() to detect\n # pending ellipsis nodes and flush them.\n packer = changegroup.getbundler(version, repo)\n # Let the packer have access to the narrow matcher so it can\n # omit filelogs and dirlogs as needed\n packer._narrow_matcher = lambda : match\n # Give the packer the list of nodes which should not be\n # ellipsis nodes. We store this rather than the set of nodes\n # that should be an ellipsis because for very large histories\n # we expect this to be significantly smaller.\n packer.full_nodes = relevant_nodes\n # Maps ellipsis revs to their roots at the changelog level.\n packer.precomputed_ellipsis = ellipsisroots\n # Maps CL revs to per-revlog revisions. Cleared in close() at\n # the end of each group.\n packer.clrev_to_localrev = {}\n packer.next_clrev_to_localrev = {}\n # Maps changelog nodes to changelog revs. Filled in once\n # during changelog stage and then left unmodified.\n packer.clnode_to_rev = {}\n packer.changelog_done = False\n # If true, informs the packer that it is serving shallow content and might\n # need to pack file contents not introduced by the changes being packed.\n packer.is_shallow = depth is not None\n\n return packer.generate(common, visitnodes, False, source)\n\n# Serve a changegroup for a client with a narrow clone.\ndef getbundlechangegrouppart_narrow(bundler, repo, source,\n bundlecaps=None, b2caps=None, heads=None,\n common=None, **kwargs):\n cgversions = b2caps.get('changegroup')\n if cgversions: # 3.1 and 3.2 ship with an empty value\n cgversions = [v for v in cgversions\n if v in changegroup.supportedoutgoingversions(repo)]\n if not cgversions:\n raise ValueError(_('no common changegroup version'))\n version = max(cgversions)\n else:\n raise ValueError(_(\"server does not advertise changegroup version,\"\n \" can't negotiate support for ellipsis nodes\"))\n\n include = sorted(filter(bool, kwargs.get(r'includepats', [])))\n exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))\n newmatch = narrowspec.match(repo.root, include=include, exclude=exclude)\n if not repo.ui.configbool(\"experimental\", \"narrowservebrokenellipses\"):\n outgoing = exchange._computeoutgoing(repo, heads, common)\n if not outgoing.missing:\n return\n def wrappedgetbundler(orig, *args, **kwargs):\n bundler = orig(*args, **kwargs)\n bundler._narrow_matcher = lambda : newmatch\n return bundler\n with extensions.wrappedfunction(changegroup, 'getbundler',\n wrappedgetbundler):\n cg = changegroup.makestream(repo, outgoing, version, source)\n part = bundler.newpart('changegroup', data=cg)\n part.addparam('version', version)\n if 'treemanifest' in repo.requirements:\n part.addparam('treemanifest', '1')\n\n if include or exclude:\n narrowspecpart = bundler.newpart(_SPECPART)\n if include:\n narrowspecpart.addparam(\n _SPECPART_INCLUDE, '\\n'.join(include), mandatory=True)\n if exclude:\n narrowspecpart.addparam(\n _SPECPART_EXCLUDE, '\\n'.join(exclude), mandatory=True)\n\n return\n\n depth = kwargs.get(r'depth', None)\n if depth is not None:\n depth = int(depth)\n if depth < 1:\n raise error.Abort(_('depth must be positive, got %d') % depth)\n\n heads = set(heads or repo.heads())\n common = set(common or [nullid])\n oldinclude = sorted(filter(bool, kwargs.get(r'oldincludepats', [])))\n oldexclude = sorted(filter(bool, kwargs.get(r'oldexcludepats', [])))\n known = {bin(n) for n in kwargs.get(r'known', [])}\n if known and (oldinclude != include or oldexclude != exclude):\n # Steps:\n # 1. Send kill for \"$known & ::common\"\n #\n # 2. Send changegroup for ::common\n #\n # 3. Proceed.\n #\n # In the future, we can send kills for only the specific\n # nodes we know should go away or change shape, and then\n # send a data stream that tells the client something like this:\n #\n # a) apply this changegroup\n # b) apply nodes XXX, YYY, ZZZ that you already have\n # c) goto a\n #\n # until they've built up the full new state.\n # Convert to revnums and intersect with \"common\". The client should\n # have made it a subset of \"common\" already, but let's be safe.\n known = set(repo.revs(\"%ln & ::%ln\", known, common))\n # TODO: we could send only roots() of this set, and the\n # list of nodes in common, and the client could work out\n # what to strip, instead of us explicitly sending every\n # single node.\n deadrevs = known\n def genkills():\n for r in deadrevs:\n yield _KILLNODESIGNAL\n yield repo.changelog.node(r)\n yield _DONESIGNAL\n bundler.newpart(_CHANGESPECPART, data=genkills())\n newvisit, newfull, newellipsis = _computeellipsis(\n repo, set(), common, known, newmatch)\n if newvisit:\n cg = _packellipsischangegroup(\n repo, common, newmatch, newfull, newellipsis,\n newvisit, depth, source, version)\n part = bundler.newpart('changegroup', data=cg)\n part.addparam('version', version)\n if 'treemanifest' in repo.requirements:\n part.addparam('treemanifest', '1')\n\n visitnodes, relevant_nodes, ellipsisroots = _computeellipsis(\n repo, common, heads, set(), newmatch, depth=depth)\n\n repo.ui.debug('Found %d relevant revs\\n' % len(relevant_nodes))\n if visitnodes:\n cg = _packellipsischangegroup(\n repo, common, newmatch, relevant_nodes, ellipsisroots,\n visitnodes, depth, source, version)\n part = bundler.newpart('changegroup', data=cg)\n part.addparam('version', version)\n if 'treemanifest' in repo.requirements:\n part.addparam('treemanifest', '1')\n\ndef applyacl_narrow(repo, kwargs):\n ui = repo.ui\n username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())\n user_includes = ui.configlist(\n _NARROWACL_SECTION, username + '.includes',\n ui.configlist(_NARROWACL_SECTION, 'default.includes'))\n user_excludes = ui.configlist(\n _NARROWACL_SECTION, username + '.excludes',\n ui.configlist(_NARROWACL_SECTION, 'default.excludes'))\n if not user_includes:\n raise error.Abort(_(\"{} configuration for user {} is empty\")\n .format(_NARROWACL_SECTION, username))\n\n user_includes = [\n 'path:.' if p == '*' else 'path:' + p for p in user_includes]\n user_excludes = [\n 'path:.' if p == '*' else 'path:' + p for p in user_excludes]\n\n req_includes = set(kwargs.get(r'includepats', []))\n req_excludes = set(kwargs.get(r'excludepats', []))\n\n req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(\n req_includes, req_excludes, user_includes, user_excludes)\n\n if invalid_includes:\n raise error.Abort(\n _(\"The following includes are not accessible for {}: {}\")\n .format(username, invalid_includes))\n\n new_args = {}\n new_args.update(kwargs)\n new_args['includepats'] = req_includes\n if req_excludes:\n new_args['excludepats'] = req_excludes\n return new_args\n\n@bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE))\ndef _handlechangespec_2(op, inpart):\n includepats = set(inpart.params.get(_SPECPART_INCLUDE, '').splitlines())\n excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, '').splitlines())\n if not changegroup.NARROW_REQUIREMENT in op.repo.requirements:\n op.repo.requirements.add(changegroup.NARROW_REQUIREMENT)\n op.repo._writerequirements()\n op.repo.setnarrowpats(includepats, excludepats)\n\n@bundle2.parthandler(_CHANGESPECPART)\ndef _handlechangespec(op, inpart):\n repo = op.repo\n cl = repo.changelog\n\n # changesets which need to be stripped entirely. either they're no longer\n # needed in the new narrow spec, or the server is sending a replacement\n # in the changegroup part.\n clkills = set()\n\n # A changespec part contains all the updates to ellipsis nodes\n # that will happen as a result of widening or narrowing a\n # repo. All the changes that this block encounters are ellipsis\n # nodes or flags to kill an existing ellipsis.\n chunksignal = changegroup.readexactly(inpart, 4)\n while chunksignal != _DONESIGNAL:\n if chunksignal == _KILLNODESIGNAL:\n # a node used to be an ellipsis but isn't anymore\n ck = changegroup.readexactly(inpart, 20)\n if cl.hasnode(ck):\n clkills.add(ck)\n else:\n raise error.Abort(\n _('unexpected changespec node chunk type: %s') % chunksignal)\n chunksignal = changegroup.readexactly(inpart, 4)\n\n if clkills:\n # preserve bookmarks that repair.strip() would otherwise strip\n bmstore = repo._bookmarks\n class dummybmstore(dict):\n def applychanges(self, repo, tr, changes):\n pass\n def recordchange(self, tr): # legacy version\n pass\n repo._bookmarks = dummybmstore()\n chgrpfile = repair.strip(op.ui, repo, list(clkills), backup=True,\n topic='widen')\n repo._bookmarks = bmstore\n if chgrpfile:\n # presence of _widen_bundle attribute activates widen handler later\n op._widen_bundle = chgrpfile\n # Set the new narrowspec if we're widening. The setnewnarrowpats() method\n # will currently always be there when using the core+narrowhg server, but\n # other servers may include a changespec part even when not widening (e.g.\n # because we're deepening a shallow repo).\n if util.safehasattr(repo, 'setnewnarrowpats'):\n repo.setnewnarrowpats()\n\ndef handlechangegroup_widen(op, inpart):\n \"\"\"Changegroup exchange handler which restores temporarily-stripped nodes\"\"\"\n # We saved a bundle with stripped node data we must now restore.\n # This approach is based on mercurial/repair.py@6ee26a53c111.\n repo = op.repo\n ui = op.ui\n\n chgrpfile = op._widen_bundle\n del op._widen_bundle\n vfs = repo.vfs\n\n ui.note(_(\"adding branch\\n\"))\n f = vfs.open(chgrpfile, \"rb\")\n try:\n gen = exchange.readbundle(ui, f, chgrpfile, vfs)\n if not ui.verbose:\n # silence internal shuffling chatter\n ui.pushbuffer()\n if isinstance(gen, bundle2.unbundle20):\n with repo.transaction('strip') as tr:\n bundle2.processbundle(repo, gen, lambda: tr)\n else:\n gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)\n if not ui.verbose:\n ui.popbuffer()\n finally:\n f.close()\n\n # remove undo files\n for undovfs, undofile in repo.undofiles():\n try:\n undovfs.unlink(undofile)\n except OSError as e:\n if e.errno != errno.ENOENT:\n ui.warn(_('error removing %s: %s\\n') %\n (undovfs.join(undofile), stringutil.forcebytestr(e)))\n\n # Remove partial backup only if there were no exceptions\n vfs.unlink(chgrpfile)\n\ndef setup():\n \"\"\"Enable narrow repo support in bundle2-related extension points.\"\"\"\n extensions.wrapfunction(bundle2, 'getrepocaps', getrepocaps_narrow)\n\n getbundleargs = wireprototypes.GETBUNDLE_ARGUMENTS\n\n getbundleargs['narrow'] = 'boolean'\n getbundleargs['depth'] = 'plain'\n getbundleargs['oldincludepats'] = 'csv'\n getbundleargs['oldexcludepats'] = 'csv'\n getbundleargs['includepats'] = 'csv'\n getbundleargs['excludepats'] = 'csv'\n getbundleargs['known'] = 'csv'\n\n # Extend changegroup serving to handle requests from narrow clients.\n origcgfn = exchange.getbundle2partsmapping['changegroup']\n def wrappedcgfn(*args, **kwargs):\n repo = args[1]\n if repo.ui.has_section(_NARROWACL_SECTION):\n getbundlechangegrouppart_narrow(\n *args, **applyacl_narrow(repo, kwargs))\n elif kwargs.get(r'narrow', False):\n getbundlechangegrouppart_narrow(*args, **kwargs)\n else:\n origcgfn(*args, **kwargs)\n exchange.getbundle2partsmapping['changegroup'] = wrappedcgfn\n\n # disable rev branch cache exchange when serving a narrow bundle\n # (currently incompatible with that part)\n origrbcfn = exchange.getbundle2partsmapping['cache:rev-branch-cache']\n def wrappedcgfn(*args, **kwargs):\n repo = args[1]\n if repo.ui.has_section(_NARROWACL_SECTION):\n return\n elif kwargs.get(r'narrow', False):\n return\n else:\n origrbcfn(*args, **kwargs)\n exchange.getbundle2partsmapping['cache:rev-branch-cache'] = wrappedcgfn\n\n # Extend changegroup receiver so client can fixup after widen requests.\n origcghandler = bundle2.parthandlermapping['changegroup']\n def wrappedcghandler(op, inpart):\n origcghandler(op, inpart)\n if util.safehasattr(op, '_widen_bundle'):\n handlechangegroup_widen(op, inpart)\n wrappedcghandler.params = origcghandler.params\n bundle2.parthandlermapping['changegroup'] = wrappedcghandler\n","sub_path":"src/Contents/Resources/mercurial_local/hgext/narrow/narrowbundle2.py","file_name":"narrowbundle2.py","file_ext":"py","file_size_in_byte":20794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"184531818","text":"\"\"\"\nURLify: Write a method to replace all spaces in a string with '%20'. You may assume that the string\nhas sufficient space at the end to hold the additional characters, and that you are given the \"true\"\nlength of the string. (Note: If implementing in Java, please use a character array so that you can\nperform this operation in place.)\n\nExample:\ninput: \"Mr John Smith \", 13\noutput: \"Mr%20John%20Smith\"\n\"\"\"\n\ndef urlify01(string):\n\n chars = list(string.strip())\n n = len(chars)\n\n for i in range(n):\n if chars[i] == ' ':\n chars[i] = '%20'\n\n return ''.join(chars)\n\ndef urlify02(string):\n res = ''\n l, r = 0, len(string) - 1\n\n while string[l] == ' ' or string[r] == ' ':\n if string[l] == ' ':\n l += 1\n if string[r] == ' ':\n r -= 1\n\n for i in range(l, r + 1):\n if string[i] == ' ':\n res += '%20'\n else:\n res += string[i]\n\n return res\n\n\nstring = \"Mr John Smith \"\nprint(urlify02(string))\n","sub_path":"problems-CTCI/CTCI-01-3/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"181589253","text":"import requests\n\nfrom webapp.db import db # подключение бд\nfrom webapp.news.models import News\n\ndef get_html(url):\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.143 YaBrowser/19.7.1.114 Yowser/2.5 Safari/537.36'\n }\n try:\n result = requests.get(url, headers=headers)\n result.raise_for_status()\n return result.text\n except(requests.RequestException, ValueError):\n return False\n\n# сохранение в бд\ndef save_news(title, url, published):\n # делаем проверку на то есть ли новость в бд\n news_exists = News.query.filter(News.url == url).count()\n if not news_exists:\n new_news = News(title=title, url=url, published=published)\n db.session.add(new_news)\n db.session.commit()","sub_path":"webapp/news/parsers/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"8230360","text":"#!/bin/python2\n\nfrom math import cos\nfrom math import log\nimport pygame\nfrom pygame import gfxdraw\nfrom random import randint\nfrom sys import exit\n\nwhite = (255,255,255)\nblack = (0,0,0)\n\nres = (1280,800)\nfps = 60\n\npygame.init()\nscreen = pygame.display.set_mode(res)\nclock = pygame.time.Clock()\n\nparticleMap = []\nxlim = [res[0]*0/4.0, res[0]*4/4.0]\nylim = [res[1]*0/4.0, res[1]*4/4.0]\narea = res[0]/1.0 * res[1]/1.0\nDENSITYCONSTANT = 1/50000.0 # 1 particle per 10 square pixels\ndensity = 1.0 * DENSITYCONSTANT\ndepth = 100.0\nnewParticles = int(density * area)\n\ndef particleGen(d=1.0, collection=particleMap, x=xlim, y=ylim, a=area):\n ''' Populate a collection of particles with new particles '''\n # Variable number of particles?\n n = d * a\n for i in range(n):\n collection.append(randint(x[0],x[1]), randint(y[0], y[1]))\n\nwhile True:\n clock.tick(fps)\n pygame.display.set_caption(\"Starfield | FPS: {:4.2f}\".format(clock.get_fps()))\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n exit()\n\n pygame.Surface.fill(screen, black)\n\n # particleGen\n for i in range(newParticles):\n # Distance from center of screen\n # posX, posY, dist, depth, direction\n posX = randint(xlim[0], xlim[1])\n deltaX = posX - res[0]/2.0\n posY = randint(ylim[0], ylim[1])\n deltaY = posY - res[1]/2.0\n dist = ( (deltaX)**2 + (deltaY)**2 ) ** 0.5\n if dist == 0:\n dist = 0.01\n if deltaX == 0:\n direction = (0, float(deltaY)/dist)\n elif deltaY == 0:\n direction = (float(deltaX)/dist, 0)\n else:\n direction = (float(deltaX)/dist, float(deltaY)/dist)\n particleMap.append([posX, posY, dist, depth, direction, white])\n\n # updateParticles\n for p in particleMap:\n v = 250 * p[3]**-1.1\n dvec = p[4]\n p[0] += v * dvec[0]\n p[1] += v * dvec[1]\n p[3] -= 1\n sz = (100-p[3])/50\n if p[0] < 0 or p[1] < 0 or p[0] > res[0] or p[1] > res[1]:\n del particleMap[particleMap.index(p)]\n elif p[0] == res[0]/2.0 and p[1] == res[1]/2.0:\n del particleMap[particleMap.index(p)]\n else:\n col = (170*sz/1.9,170*sz/1.9,170*sz/1.9)\n colMult = 255*sz\n colVal = colMult if colMult <= 255 else 255\n pygame.gfxdraw.filled_circle(screen, int(p[0]), int(p[1])\\\n ,int(log(sz+1)), (colVal, colVal, colVal) )\n\n pygame.display.update()\n","sub_path":"starfield.py","file_name":"starfield.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"143541220","text":"#! /usr/bin/env python\n\n# Kikyo is a programming language made for fun.\n\nfrom __future__ import print_function\nimport sys\n\ntape = [0] * 30000\nregister = {}\npointer = 0\n\ndef chop(string, beginning):\n if string.startswith(beginning):\n return string[len(beginning):]\n return string\n\ndef rchop(string, ending):\n if string.endswith(ending):\n return string[:-len(ending)]\n return string\n\ndef main(program=None):\n global tape\n global register\n global pointer\n if program == None:\n f = open(sys.argv[len(sys.argv) - 1])\n program = f.read()\n f.close()\n command = -1\n program = rchop(program, \"\\n\").split(\" \")\n while 1:\n command += 1\n if command >= len(program):\n break\n elif program[command].lower() == \"right\":\n if pointer < len(tape) - 1:\n pointer += 1\n elif program[command].lower() == \"left\":\n if pointer > 0:\n pointer -= 1\n elif program[command] == \"=\":\n if program[command + 1].startswith(\"\\\"\") and not program[command + 1].endswith(\"\\\"\"):\n val = program[command + 1]\n for c in range(command + 2, len(program)):\n val += \" \" + program[c]\n if program[c].endswith(\"\\\"\"):\n break\n val = eval(val)\n else:\n val = eval(program[command + 1])\n try: register[program[command - 1]] = val\n except: print(\"Error!\")\n elif program[command] == \"+\":\n tape[pointer] += 1\n elif program[command] == \"-\":\n tape[pointer] -= 1\n elif program[command] == \"print\":\n if program[command + 1].startswith(\"\\\"\") and not program[command + 1].endswith(\"\\\"\"):\n printout = program[command + 1]\n for c in range(command + 2, len(program)):\n printout += \" \" + program[c]\n if program[c].endswith(\"\\\"\"):\n break\n printout = eval(printout)\n else:\n printout = eval(program[command + 1])\n print(printout, end=\"\")\n elif program[command].lower() == \"fromregister\":\n print(register[program[command + 1]])\n elif program[command].lower() == \"toregister\":\n a = raw_input()\n register[a] = raw_input()\n elif program[command].lower() == \"fromtape\":\n print(unichr(tape[pointer]), end=\"\")\n elif program[command].lower() == \"totape\":\n tape[pointer] = ord(raw_input()[0])\n elif program[command].lower() == \"while\":\n p = \"\"\n count = 0\n for command2 in range(command + 1, len(program)):\n if program[command2] == \"while\":\n count += 1\n elif program[command2] == \"end\":\n count -= 1\n if count > -1:\n p = p + program[command2]\n elif count == -1:\n break\n while tape[pointer] != 0:\n main(p)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"random/kikyo.py","file_name":"kikyo.py","file_ext":"py","file_size_in_byte":3173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"118488035","text":"from django.conf.urls import url\n\nfrom room_reservations import views\n\nurlpatterns = [\n url(r'ta$', views.TARoomReservationList.as_view(), name='ta-room-reservation-list'),\n url(r'update/(?P\\d+)$', views.RoomReservationUpdate.as_view(), name='room-reservation-update'),\n url(r'submit/$', views.RoomReservationSubmit.as_view(), name='room-reservation-submit'),\n url(r'(?P[ADF])/(?P\\d+)$', views.reservation_modify_status, name='reservation-modify-status'),\n url(r'schedule$', views.RoomReservationSchedule.as_view(), name='room-reservation-schedule'),\n url(r'^delete/(?P\\d+)$', views.RoomReservationDelete.as_view(), name='room-reservation-delete'),\n url(r'^delete/$', views.RoomReservationDelete.as_view(), name='room-reservation-delete-base'),\n url(r'^tv_page$', views.RoomReservationTVView.as_view(), name='room-reservation-tv-page'),\n url(r'^itero_page$', views.RoomReservationIteroView.as_view(), name='room-reservation-itero-page'),\n url(r'^weather$', views.weather_api, name='weather'),\n url(r'^tv_page_version$', views.tv_page_version, name='tv-page-version'),\n url(r'^tv_page_reservations$', views.tv_page_reservations, name='tv-page-reservations'),\n url(r'^tv_page_ticker$', views.tv_page_ticker, name='tv-page-ticker'),\n]\n","sub_path":"ap/room_reservations/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"192816934","text":"# Under The Rainbow\n# Problem 493\n#\n# 70 colored balls are placed in an urn, 10 for each of the seven rainbow colors.\n#\n# What is the expected number of distinct colors in 20 randomly picked balls?\n#\n# Give your answer with nine digits after the decimal point (a.bcdefghij).\n\np_no_red = 1.0\n\nfor i in range(41, 61):\n p_no_red *= i\n\nfor i in range(51, 71):\n p_no_red /= i\n\nprint(p_no_red)\n\np_some_red = 1 - p_no_red\nprint(p_some_red)\n\nexpected_number_of_colors = 7 * p_some_red\nprint(round(expected_number_of_colors, 9))\n\n# Answer: 6.818741802\n\n'''\nNote:\n\np_no_red is calculated as follows:\n\n60 59 58 41 60! * 50!\n-- x -- x -- x ... x -- = ---------\n70 69 68 51 70! * 40!\n\nI figure an iterative approach should be faster than using factorials,\nsince most cancels.\n'''\n","sub_path":"euler_493.py","file_name":"euler_493.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"109377905","text":"#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2020\n# Leandro Toledo de Souza \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\nimport pytest\nfrom flaky import flaky\n\nfrom telegram import ParseMode\n\n\nclass TestParseMode(object):\n markdown_text = '*bold* _italic_ [link](http://google.com) [name](tg://user?id=123456789).'\n html_text = ('bold italic link '\n 'name.')\n formatted_text_formatted = u'bold italic link name.'\n\n @flaky(3, 1)\n @pytest.mark.timeout(10)\n def test_send_message_with_parse_mode_markdown(self, bot, chat_id):\n message = bot.send_message(chat_id=chat_id, text=self.markdown_text,\n parse_mode=ParseMode.MARKDOWN)\n\n assert message.text == self.formatted_text_formatted\n\n @flaky(3, 1)\n @pytest.mark.timeout(10)\n def test_send_message_with_parse_mode_html(self, bot, chat_id):\n message = bot.send_message(chat_id=chat_id, text=self.html_text,\n parse_mode=ParseMode.HTML)\n\n assert message.text == self.formatted_text_formatted\n","sub_path":"tests/test_parsemode.py","file_name":"test_parsemode.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"141784116","text":"from flask import Flask, render_template, request, redirect, url_for\n\napp = Flask(__name__)\n\ndef convert_miliseconds(ms):\n result = \"\"\n lst = [(ms // 3600000, ' hour/s '), ((ms // 60000) % 60, ' minute/s '), ((ms // 1000) % 60, ' second/s ')]\n for t, text in lst:\n result += f\"{t and (str(t) + text) or ''}\"\n return f\"{result or ('just ' + str(ms) + ' miliseconds') }\"\n\n@app.route(\"/\", methods = [\"GET\", \"POST\"])\ndef home():\n if request.method == \"POST\":\n number = request.form[\"number\"]\n if number.isdigit():\n return render_template(\"result.html\", milliseconds = number, result = convert_miliseconds(int(number)), developer_name = \"Baris YURTTAV\") \n else: \n return render_template(\"index.html\", not_valid = True, developer_name = \"Group_Kilo\") \n else:\n return render_template(\"index.html\", not_valid = False, developer_name = \"Group_Kilo\")\n\nif __name__ == \"__main__\":\n #app.run(debug = True)\n app.run(host='0.0.0.0', port=80)","sub_path":"aws/projects/002-milliseconds-converter/JKL/Kilo/groupkilo_msapp.py","file_name":"groupkilo_msapp.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"192263684","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import metrics\n\n# ROC curves are typically used in binary classification to study the output of a classifier.\n\ndef ComputeAUC(model, data, targets):\n predictions = model.predict(data)\n fpr, tpr, _ = metrics.roc_curve(targets, predictions, pos_label=1)\n auc = metrics.auc(fpr, tpr)\n # print \"AUC = \", auc\n\n plt.figure()\n lw = 2 # line width\n plt.plot(\n fpr,\n tpr,\n color='darkorange',\n lw=lw,\n label='ROC curve (area = %0.2f)' % auc)\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic example')\n plt.legend(loc=\"lower right\")\n plt.show()\n\n\n\ndef df2xy(datasetframe, target):\n features = list(datasetframe.columns.values)\n features.remove(target)\n x = datasetframe[features]\n y = datasetframe[target]\n return x, y\n\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import cross_val_predict\nfrom sklearn import linear_model\n\n\n# Linear Regression using sklearn\ndef sklearn_linear_regression(x, y, test_size=0.3, log_scale_plot=False):\n \"\"\"\n :param x: x can be either one-dimension or multi-dimension\n :param y: y is the dependent variable\n :param test_size: the ratio to split training and testing sets\n :return:\n \"\"\"\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=42)\n\n regr = linear_model.LinearRegression()\n regr.fit(x_train, y_train)\n\n print('Coefficients: \\n', regr.coef_)\n # The mean square error\n print(\"Residual sum of squares: %.2f\" % np.mean((regr.predict(x_test) - y_test) ** 2))\n # Explained variance score: 1 is perfect prediction\n print('Variance score: %.2f' % regr.score(x_test, y_test))\n\n # sklearn linear regression cross validation\n print(\"10-Fold TestingSet Cross Validation:\")\n cross_val_score_results = cross_val_score(regr, x_test, y_test, cv=10)\n print(cross_val_score_results)\n print(\"Mean Score =\", cross_val_score_results.mean())\n\n predicted = cross_val_predict(regr, x_test, y_test, cv=10)\n plt.figure(figsize=(12,6))\n plt.scatter(y_test, predicted, edgecolors=(0, 0, 0))\n plt.plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], 'k--', lw=4)\n plt.xlabel('Measured')\n plt.ylabel('Predicted')\n if log_scale_plot:\n plt.xscale('log')\n plt.yscale('log')\n plt.show()\n\n return regr.coef_\n\n# Gradient Boosting Regressor\n# This example fits a Gradient Boosting model with least squares loss and 500 regression trees of depth 4.\n\nfrom sklearn import ensemble\nfrom sklearn.metrics import mean_squared_error\nimport matplotlib.pyplot as plt\n\n\ndef sklearn_gradient_boosting_regressor(x, y, test_size=0.3):\n \"\"\"\n :param x: x can be either one-dimension or multi-dimension\n :param y: y is the dependent variable\n :param test_size: the ratio to split training and testing sets\n :return:\n \"\"\"\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=42)\n\n params = {'n_estimators': 50, 'max_depth': 4, 'min_samples_split': 2,\n 'learning_rate': 0.01, 'loss': 'ls'}\n\n regr = ensemble.GradientBoostingRegressor(**params)\n regr.fit(x_train, y_train)\n mse = mean_squared_error(y_test, regr.predict(x_test))\n print(\"MSE: %.4f\" % mse)\n print(\"Score: %.4f\" % regr.score(x_test, y_test))\n\n # plot test set deviance\n test_score = np.zeros((params['n_estimators'],), dtype=np.float64)\n for i, y_pred in enumerate(regr.staged_predict(x_test)):\n test_score[i] = regr.loss_(y_test, y_pred)\n\n plt.figure(figsize=(12, 6))\n plt.subplot(1, 2, 1)\n plt.title('Deviance')\n plt.plot(np.arange(params['n_estimators']) + 1, regr.train_score_, 'b-',\n label='Training Set Deviance')\n plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',\n label='Test Set Deviance')\n plt.legend(loc='upper right')\n plt.xlabel('Boosting Iterations')\n plt.ylabel('Deviance')\n plt.show()\n\n return regr.feature_importances_\n\n# Show feature importance\n# feature_importance corresponds to regr.feature_importances_ or regr.coef_\n\ndef plot_feature_importance(feature_importance, feature_names=None):\n # make importances relative to max importance\n feature_importance = 100.0 * (feature_importance / feature_importance.max())\n sorted_idx = np.argsort(feature_importance)\n pos = np.arange(sorted_idx.shape[0]) + .5\n plt.figure(figsize=(10, 6))\n plt.barh(pos, feature_importance[sorted_idx], align='center')\n if feature_names != None:\n plt.yticks(pos, np.array(feature_names)[sorted_idx])\n plt.xlabel('Relative Importance')\n plt.title('Variable Importance')\n plt.show()\n\n","sub_path":"sklearn.py","file_name":"sklearn.py","file_ext":"py","file_size_in_byte":4992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"651566637","text":"__author__ = 'yjin'\nfrom global_constants import *\n# from PIL import Image\nimport numpy as np\nimport time\n\nclass Logger:\n def __init__(self, loss=LOSS_TYPE[\"CROSS_ENTROPY\"], lr=0.1, batch_size=100):\n self.epoch = 0\n self.loss_type = loss\n self.learning_rate = lr\n self.batch_size = batch_size\n self.grad = []\n self.t_err = []\n self.t_acc = []\n self.v_err = []\n self.v_acc = []\n self.dir = \"../images/\"\n\n self.final_f = None\n self.final_y = None\n\n self.start = None\n\n def update(self, t_err, t_acc, v_err, v_acc):\n self.t_err.append(t_err)\n self.t_acc.append(t_acc)\n\n self.v_err.append(v_err)\n self.v_acc.append(v_acc)\n\n self.epoch += 1\n # print \"Epoch \", self.epoch-1, \" >> t_err= \", t_err, \", t_acc= \", t_acc, \", v_err= \", v_err, \", v_acc= \", v_acc\n\n def gradient(self, grad):\n self.grad.append(grad)\n\n # row : prediction, col : label\n # def draw_conf_mat(self, f, y, name):\n # batch = f.shape[0]\n # mat = np.zeros((f.shape[1], f.shape[1]))\n # pr = f.argmax(axis=1)\n # lb = y.argmax(axis=1)\n #\n # for b in range(batch):\n # mat[lb[b], pr[b]] += 1\n #\n # mat = np.array(255.0/mat.max() * (mat - mat.min())).astype(np.uint8)\n #\n # img = Image.fromarray(mat)\n # img.save(self.dir + \"conf_mat/\" + name + \".png\")\n\n # def draw_mean(self, mean, name):\n # for i in range(mean.shape[0]):\n # mat = np.resize(mean[i]*255, (28, 28)).astype(np.uint8)\n # img = Image.fromarray(mat)\n # img.save(self.dir + \"mean/\" + name + str(i) + \".png\")\n\n # def show_img(self, mat):\n # mat = np.array(255.0/mat.max() * (mat - mat.min())).astype(np.uint8)\n # img = Image.fromarray(mat)\n # img.show()\n\n def start_learn(self):\n self.start = time.time()\n\n def is_finish(self, limit):\n if (time.time() - self.start)/60 > limit:\n return True\n return False\n\n # def save_log(self, model, f, y):\n # type = HIDDEN_LIST[model.hidden_type]\n # lr = str(self.learning_rate)\n # b = str(self.batch_size)\n # ep = str(self.epoch)\n # wi = \"-1to1\"\n # fname = \"layer%d\" % model.layer + \"_\" + type + \"_lr\"+lr+\"_batch\"+b+\"_epoch\"+ep+\"_wi\"+wi\n # dump = np.vstack((self.t_err, self.t_acc, self.v_err, self.v_acc)).T\n # np.savetxt(\"../log/\"+fname+\".csv\", dump, delimiter=\",\")\n\n # self.draw_mean(model.mean, fname)\n # self.draw_conf_mat(f, y, fname)\n\n\n\n","sub_path":"MachineLearning/RBFN/code_2011-11497/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":2605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"566670588","text":"# coding: utf-8\n\nfrom sys import argv\nfrom re import findall\n\n\nclass ContaZero:\n def __init__(self, num):\n self.num = num\n\n def conta_zeros(self):\n zeros = findall(\"0+\", self.num)\n if len(zeros) != 0:\n occur = [len(_) for _ in zeros]\n return max(occur)\n else:\n return 0\n\n\nif __name__ == '__main__':\n print(ContaZero(str(argv[1:])).conta_zeros())\n","sub_path":"source/handler/conta_zero_kleyton.py","file_name":"conta_zero_kleyton.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"441967649","text":"from django.shortcuts import render\nfrom django.contrib.sites.models import Site\n\nfrom .models import Article\n\n\ndef index(request, slug=None):\n current_site = None\n if slug is None:\n article = Article.objects.latest('date')\n else:\n current_site = Site.objects.get_current()\n article = Article.objects.get(slug=slug)\n context = {'article': article, 'current_site': current_site}\n return render(request, 'article/index.html', context)\n\n\ndef archives(request, tag=None):\n if tag is None:\n articles = None\n else:\n articles = Article.objects.filter(tags__name__in=[tag])\n\n context = {'articles': articles}\n return render(request, 'article/archives.html', context)\n\n\ndef about(request):\n return render(request, 'mysite/about.html')\n","sub_path":"articles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"619796599","text":"\n\nINPUT = 'B-large.in'\nOUTPUT = 'B-large.out'\n\n\ndef solve(C, F, X):\n\n cps = 2.0\n farm_time = 0.0\n time = X / cps\n \n while True:\n farm_time += C / cps\n cps += F\n ntime = farm_time + X / cps\n if ntime < time:\n time = ntime\n else:\n break\n return time\n\n\nif __name__ == '__main__':\n inp = open(INPUT)\n out = open(OUTPUT, 'w')\n \n T = int(inp.readline())\n\n for case in range(T):\n sol = solve(*map(float, inp.readline().split()))\n out.write('Case #%i: %.7f\\n' % (case + 1, sol))","sub_path":"solutions_python/Problem_136/2419.py","file_name":"2419.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"173207141","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\n\"\"\"宁夏政府网站信息获取\"\"\"\n\nimport requests,jieba,sys,time,re\nfrom bs4 import BeautifulSoup\nfrom jinja2 import Template\n\n\ndef geturls(num):\n\t\"\"\"\n\t# 获取num个固原政府网的信息列表 ajax ask\n\t# params: int num\n\t# return: list urls\n\t\"\"\"\n\turls=[]\n\theader={'user-agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:53.0) Gecko/20100101 Firefox/53.0'}\n\tfor i in range(1,num):\n\t\tpayload={'classid':'10','strnum':\"80\",'showtype':'00111','linenum':'5','pagesize':\"30\",'currpage':str(i)}\n\t\tr=requests.get('http://web.nxgy.gov.cn/plus/ajaxpage/text.php',params=payload,headers=header)\n\t\tsep=BeautifulSoup(r.text,'lxml')\n\t\tfor url in sep.find_all('a'):\n\t\t\turls.append(url.get('href').replace('\\\\\"',''))\n\treturn urls\n\ndef getcontent(url):\n\t\"\"\"\n\t# 根据url获取固原政府网的新闻资讯内容\n\t# params: string url\n\t# return: dict\n\t\"\"\"\n\theader={'user-agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:53.0) Gecko/20100101 Firefox/53.0'}\n\trep=requests.get(url,headers=header)\n\trep.encoding=\"utf-8\"\n\tsep=BeautifulSoup(rep.text,'lxml')\n\ttitle=sep.title.string\n\ttimes=sep.select('.con_box')[0].get_text()\n\ttime=times[-11:]\n\tcontent=sep.select('.tex_box')[0].get_text()\n\treturn {'url':url,'title':title,'content':content,'time':time}\n\ndef create(me):\n\t\"\"\"\n\t# 根据内容字典生成文章页面\n\t# params: dict\n\t# return: None\n\t\"\"\"\n\ttep=\"\"\"\n\t{{ title }}_固原政府网

    {{ title }}

    {{ content }}
    \n\t\"\"\"\n\ttemplate = Template(tep)\n\ts=template.render(title=me['title'],content=me['content'])\n\twith open(\"2.html\",'w',encoding = 'utf-8') as me:\n\t\tme.write(s)\n\n\ndef main():\n\t\n\t# url=\"http://www.nxgy.gov.cn/article/201705/46973.html\"\n\t# me=getcontent(url)\n\t# print(me)\n\tninxia={\n \"ningxia\":(\"宁夏\",'http://www.nx.gov.cn/'),\n\t\"yinchuan\":(\"银川\",'http://www.yinchuan.gov.cn/'),\n\t\"xinqinqu\":(\"兴庆区\",'http://www.xqq.gov.cn/'),\n\t\"xixiaqu\":(\"西夏区\",'http://www.ycxixia.gov.cn/'),\n\t\"jinfengqu\":(\"金凤区\",'http://www.ycjinfeng.gov.cn/'),\n\t\"linwu\":(\"灵武市\",\"http://www.nxlw.gov.cn/\"),\n\t\"yongningxian\":(\"永宁县\",\"http://www.chinayn.gov.cn/\"),\n\t\"helanxian\":(\"贺兰县\",\"http://www.nxhl.gov.cn/\"),\n\t\"shizuishan\":(\"石嘴山市\",\"http://www.nxszs.gov.cn/\"),\n\t\"dawukouqu\":(\"大武口区\",\"http://www.dwk.gov.cn/\"),\n\t\"huinongqu\":(\"惠农区\",\"http://www.huinong.gov.cn/\"),\n\t\"pinluoxian\":(\"平罗县\",\"http://www.nxpl.gov.cn/\"),\n\t\"zhongwei\":(\"中卫市 \",\"http://www.nxzw.gov.cn/\"),\n\t\"shapotouqu\":(\"沙坡头区\",\"http://www.sptq.gov.cn/\"),\n\t\"zhongningxian\":(\"中宁县\",\"http://www.nxzn.gov.cn/\"),\n\t\"haiyuanxian\":(\"海原县\",\"http://www.hy.gov.cn/\"),\n\t\"guyuanshi\":(\"固原市\",\"http://www.nxgy.gov.cn/\"),\n\t\"yuanzhouqu\":(\"原州区\",\"http://www.yzh.gov.cn/\"),\n\t\"xijixian\":(\"西吉县\",\"http://www.nxxj.gov.cn/\"),\n\t\"longdexian\":(\"隆德县\",\"http://www.nxld.gov.cn/\"),\n\t\"jinyuanxian\":(\"泾源县\",\"http://www.nxjy.gov.cn/\"),\n\t\"pengyangxian\":(\"彭阳县\",\"http://www.pengyang.gov.cn/\"),\n\t\"wuzhongshi\":(\"吴忠市\",\"http://www.wuzhong.gov.cn/\"),\n\t\"litongqu\":{\"利通区\",'http://ltq.wuzhong.gov.cn/'},\n\t\"hongsibuqu\":{\"红寺堡区\",\"http://hsb.wuzhong.gov.cn/\"},\n\t\"qingtongxiashi\":{\"青铜峡市\",\"http://qtx.wuzhong.gov.cn/\"},\n\t\"tongxinxian\":{\"同心县 \",\"http://www.nxtx.gov.cn/\"},\n\t\"yanchixian\":{\"盐池县\",\"http://www.yanchi.gov.cn/\"}\n\t}\n\tprint(ninxia)\n\n\nif __name__ == '__main__':\n\tmain()\n\tsys.exit(\"game over\")\n","sub_path":"gov/ningxia.py","file_name":"ningxia.py","file_ext":"py","file_size_in_byte":3494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"182268539","text":"def main():\n n = int(input())\n a = int(input())\n b = int(input())\n c = int(input())\n\n adj = [[0, a, b], [a, 0, c], [b, c, 0]]\n\n ans = 0\n pos = 0\n for _ in range(n - 1):\n dst = -1\n for j in range(3):\n if j == pos:\n continue\n if dst == -1 or adj[pos][j] < adj[pos][dst]:\n dst = j\n\n ans += adj[pos][dst]\n pos = dst\n\n print(ans)\n\n\nmain()\n","sub_path":"codeforces/876/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"322892378","text":"import RPi.GPIO as GPIO\nimport time\nimport Adafruit_DHT\nGPIO.setmode(GPIO.BCM)\ndht=27\npir_in=4\npir_out=23\nldr_in=17\nldr_out=24\nGPIO.setup(pir_in,GPIO.IN)\nGPIO.setup(pir_out,GPIO.OUT)\nGPIO.setup(ldr_out,GPIO.OUT)\ncurrent_state=0\ni=0\ndef rc_time (ldr_in):\n count = 0;\n\n GPIO.setup(ldr_in, GPIO.OUT)\n GPIO.output(ldr_in, GPIO.LOW)\n time.sleep(0.1)\n GPIO.setup(ldr_in, GPIO.IN)\n\n while (GPIO.input(ldr_in) == GPIO.LOW):\n count += 1\n\n return count\ntry:\n #Main loop\n while True:\n \n lumens=rc_time(ldr_in)\n print (lumens)\n if (lumens>10000):\n GPIO.output(ldr_out,True)\n else:\n GPIO.output(ldr_out,False)\n \n current_state=GPIO.input(pir_in) \n if (current_state==1):\n GPIO.output(pir_out,True)\n else:\n GPIO.output(pir_out,False)\n humidity, temperature = Adafruit_DHT.read_retry(11, 27)\n print (\"Humidity = {} %; Temperature = {} C\".format(humidity,temperature))\n time.sleep(5)\n \nexcept KeyboardInterrupt:\n pass\nfinally:\n GPIO.cleanup()\n\n\n\n\n\n","sub_path":"Home Automation.py","file_name":"Home Automation.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"577485534","text":"from artist import Artist\nfrom song import Song\nfrom genre import Genre\n#artists\nlady_gaga = Artist(\"Lady Gaga\")\nhozier = Artist(\"Hozier\")\ndua_lipa = Artist(\"Dua Lipa\")\nfyre = Artist(\"FYRE\")\n#genres\npop = Genre(\"Pop\")\nrock = Genre(\"Rock\")\nalt = Genre(\"Alternative\")\nindie = Genre(\"Indie\")\nfolk = Genre(\"Folk\")\ncountry = Genre(\"Country\")\nfunk = Genre(\"Funk\")\njam = Genre(\"Jam\")\nrap = Genre(\"Rap\")\n\nmbappe = Song(\"Kylian Mbappe\", fyre, rap)\npoker_face = Song(\"Poker Face\", lady_gaga, pop)\nrun = Song(\"Run\", hozier, rock)\nsedated = Song(\"Sedated\", hozier, rock)\nnew_rules = Song(\"New Rules\", dua_lipa, pop)\nidgf = Song(\"IDGF\", dua_lipa, pop)\nteam02 = Song(\"Team 02\", fyre, rap)\n","sub_path":"dilyan_sol_artists.py","file_name":"dilyan_sol_artists.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"183055811","text":"import numpy as np\nimport pandas as pd\nimport scanpy as sc\nimport anndata\nimport scprep\nimport tempfile\nimport os\n\n\ndef load_scicar(\n rna_url,\n rna_cells_url,\n rna_genes_url,\n atac_url,\n atac_cells_url,\n atac_genes_url,\n test=False,\n):\n rna_cells = pd.read_csv(rna_cells_url, low_memory=False)[\"sample\"]\n rna_genes = pd.read_csv(rna_genes_url, low_memory=False)[\"gene_id\"]\n atac_cells = pd.read_csv(atac_cells_url, low_memory=False)[\"sample\"]\n atac_genes = pd.read_csv(atac_genes_url, low_memory=False, index_col=0)[\"peak\"]\n\n with tempfile.TemporaryDirectory() as tempdir:\n rna_file = os.path.join(tempdir, \"rna.mtx.gz\")\n scprep.io.download.download_url(rna_url, rna_file)\n rna_data = scprep.io.load_mtx(rna_file, cell_axis=\"col\").tocsr()\n atac_file = os.path.join(tempdir, \"atac.mtx.gz\")\n scprep.io.download.download_url(atac_url, atac_file)\n atac_data = scprep.io.load_mtx(atac_file, cell_axis=\"col\").tocsr()\n\n if test:\n remove_genes = rna_data.sum(axis=0).A.flatten() < 1000\n rna_genes, rna_data = rna_genes[~remove_genes], rna_data[:, ~remove_genes]\n remove_genes = atac_data.sum(axis=0).A.flatten() < 1000\n atac_genes, atac_data = atac_genes[~remove_genes], atac_data[:, ~remove_genes]\n\n rna_genes, rna_data = rna_genes[:200], rna_data[:, :200]\n atac_genes, atac_data = atac_genes[:400], atac_data[:, :400]\n\n rna_data, rna_cells = scprep.filter.filter_empty_cells(rna_data, rna_cells)\n atac_data, atac_cells = scprep.filter.filter_empty_cells(atac_data, atac_cells)\n\n common_cells = np.intersect1d(rna_cells, atac_cells)\n if test:\n common_cells = common_cells[:100]\n\n rna_subset = np.isin(rna_cells, common_cells)\n rna_cells, rna_data = rna_cells[rna_subset], rna_data[rna_subset]\n rna_order = np.argsort(rna_cells.to_numpy())\n rna_cells, rna_data = rna_cells.iloc[rna_order], rna_data[rna_order]\n\n atac_subset = np.isin(atac_cells, common_cells)\n atac_cells, atac_data = atac_cells[atac_subset], atac_data[atac_subset]\n atac_order = np.argsort(atac_cells.to_numpy())\n atac_cells, atac_data = atac_cells.iloc[atac_order], atac_data[atac_order]\n\n adata = anndata.AnnData(\n rna_data,\n obs=pd.DataFrame(index=rna_cells),\n var=pd.DataFrame(index=rna_genes),\n )\n\n adata.obsm[\"mode2\"] = atac_data\n adata.uns[\"mode2_obs\"] = atac_cells.to_numpy()\n adata.uns[\"mode2_var\"] = atac_genes.to_numpy()\n return adata\n","sub_path":"openproblems/data/scicar/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"385966832","text":"from os import mkdir\nfrom os.path import dirname, realpath, isdir\n\nfilepath = f'{dirname(realpath(__file__))}/logs/'\n\nclass Log:\n def __init__(self, file):\n if not isdir(filepath):\n mkdir(filepath)\n self.filename = f'{filepath}{file}.log'\n self.clean()\n\n def clean(self):\n with open(self.filename, 'w') as f:\n f.write('')\n\n def pr(self, array):\n s = ''\n for i in array:\n s += '[ '\n for j in i:\n s += f'{j} '\n s += ']\\n'\n s += '\\n'\n return s\n\n def write(self, item, title=''):\n with open(self.filename, 'a') as f:\n f.write(f'~@[{title}]\\n')\n f.write(item)\n\n def prwrite(self, arr, title=''):\n self.write(self.pr(arr), title)\n","sub_path":"laplace/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"115974329","text":"#!/usr/bin/env python\nimport unittest\nfrom pymodbus.client.sync import ModbusTcpClient as ModbusClient\nfrom base_runner import Runner\n\nclass SynchronousTcpClient(Runner, unittest.TestCase):\n '''\n These are the integration tests for the synchronous\n tcp client.\n '''\n\n def setUp(self):\n ''' Initializes the test environment '''\n self.initialize([\"../tools/reference/diagslave\", \"-m\", \"tcp\", \"-p\", \"12345\"])\n self.client = ModbusClient(port=12345)\n\n def tearDown(self):\n ''' Cleans up the test environment '''\n self.client.close()\n self.shutdown()\n\n#---------------------------------------------------------------------------#\n# Main\n#---------------------------------------------------------------------------#\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"examples/functional/synchronous-tcp-client.py","file_name":"synchronous-tcp-client.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"242701723","text":"#!/usr/bin/python3\n\"\"\"\nFlask route that returns json status response\n\"\"\"\nfrom api.v1.views import app_views\nfrom flask import abort, jsonify, request\nfrom models import storage, CNC\nfrom os import environ\nSTORAGE_TYPE = environ.get('HBNB_TYPE_STORAGE')\n\n@app_views.route('/users//places', methods=['GET'])\ndef places_per_user(place_id=None):\n \"\"\"\n reviews route to handle http method for requested places liked by user\n \"\"\"\n user_obj = storage.get('User', place_id)\n\n if request.method == 'GET':\n if user_obj is None:\n abort(404, 'Not found')\n all_places = storage.all('Place')\n if STORAGE_TYPE == 'db':\n users_places = user_obj.places\n else:\n place_user_ids = place_obj.users\n users_places = []\n for place in place_user_ids:\n response.append(storage.get('Place', place))\n place_amenities = [\n obj.to_json() for obj in users_places\n ]\n return jsonify(users_places)\n\n\n@app_views.route('/users//places/',\n methods=['DELETE', 'POST'])\ndef user_to_place(user_id=None, place_id=None):\n \"\"\"\n reviews route to handle http methods for given user like by by place_ID\n \"\"\"\n place_obj = storage.get('Place', place_id)\n user_obj = storage.get('User', user_id)\n if place_obj is None:\n abort(404, 'Not found')\n if user_obj is None:\n abort(404, 'Not found')\n\n if request.method == 'DELETE':\n if (place_obj not in user_obj.places and\n place_obj.id not in user_obj.places):\n abort(404, 'Not found')\n if STORAGE_TYPE == 'db':\n user_obj.places.remove(place_obj)\n else:\n user_obj.place_ids.pop(place_obj.id, None)\n user_obj.save()\n return jsonify({}), 200\n\n if request.method == 'POST':\n if (places_obj in user_obj.places or\n places_obj.id in user_obj.places):\n return jsonify(places_obj.to_json()), 200\n if STORAGE_TYPE == 'db':\n user_obj.places.append(place_obj)\n else:\n user_obj.places = place_obj\n return jsonify(place_obj.to_json()), 201\n","sub_path":"roamrs1/models/users_places.py","file_name":"users_places.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"440138356","text":"import tensorflow as tf\nimport numpy as np\nimport sys, os, glob, gc\nimport pandas as pd\nfrom data import create_dataloader_train_labeled\nfrom DCGAN import DCGAN\nfrom StackedSRM import StackedSRM\nfrom DCGAN_Scorer import Scorer_head\nfrom tqdm import trange\nfrom PIL import Image\nimport datetime, time\nfrom argparse import ArgumentParser\nimport layers\n\nglobal_seed=5\n\ntf.random.set_random_seed(global_seed)\nnp.random.seed(global_seed)\n\nparser = ArgumentParser()\nparser.add_argument('-d', '--images_dir', type = str, required=True, help = 'path to directory with images 64x64 to upsample')\nparser.add_argument('-ns', '--nb_stacks', type = int, default = 4, help = 'number of stacks')\n\nargs = parser.parse_args()\n\ndef timestamp():\n return datetime.datetime.fromtimestamp(time.time()).strftime(\"%Y.%m.%d-%H:%M:%S\")\n\nCURR_TIMESTAMP=timestamp()\n\nBATCH_SIZE=1 # generate images one by one\n\nC, H, W = 1, 1000, 1000 # images dimensions\nNB_STACKS=args.nb_stacks\nINPUT_IMAGES_DIR=args.images_dir\n\n# StackedSRM paths\nlist_of_files = glob.glob('./LOG_SRM/*')\nlatest_dir = max(list_of_files, key=os.path.getctime) # latest created dir for latest experiment\nLOG_DIR_SRM=latest_dir\nCHECKPOINTS_PATH_SRM = os.path.join(LOG_DIR_SRM, \"checkpoints\")\n\n# Scorer paths\nlist_of_files = glob.glob('./LOG_DCGAN_SCORER/*')\nlatest_dir = max(list_of_files, key=os.path.getctime) # latest created dir for latest experiment\nLOG_DIR_SCORER=latest_dir\nCHECKPOINTS_PATH_SCORER = os.path.join(LOG_DIR_SCORER, \"checkpoints\")\n\nDATA_ROOT=\"./data\"\nCLUSTER_DATA_ROOT=\"/cluster/scratch/mamrani/data\"\nif os.path.exists(CLUSTER_DATA_ROOT):\n DATA_ROOT=CLUSTER_DATA_ROOT\n\nLOG_DIR = os.path.join(\"./LOG_UPSAMPLED\", CURR_TIMESTAMP)\nGENERATED_SAMPLES_DIR= os.path.join(LOG_DIR, \"upsampled_samples\")\n\n# printing parameters\nprint(\"\\n\")\nprint(\"Run infos:\")\nprint(\" BATCH_SIZE: {}\".format(BATCH_SIZE))\nprint(\" LOG_DIR_SRM: {}\".format(LOG_DIR_SRM))\nprint(\" LOG_DIR_SCORER: {}\".format(LOG_DIR_SCORER))\nprint(\" INPUT_IMAGES_DIR: {}\".format(INPUT_IMAGES_DIR))\nprint(\" GENERATED_SAMPLES_DIR: {}\".format(GENERATED_SAMPLES_DIR))\nprint(\"\\n\")\nsys.stdout.flush()\n\nimages_paths = glob.glob(os.path.join(INPUT_IMAGES_DIR, \"*\"))\n#print(images_paths)\n\n#sys.exit(0)\n# remove warning messages\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"]=\"2\"\ntf.logging.set_verbosity(tf.logging.ERROR)\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nconfig.gpu_options.visible_device_list = \"0\"\n\nsrm_graph = tf.Graph()\nsrm_sess = tf.Session(graph=srm_graph, config=config)\nwith srm_graph.as_default():\n # StackedSRM model\n print(\"Building StackedSRM model ...\")\n sys.stdout.flush()\n srm_im_pl = tf.placeholder(dtype=tf.float32, shape=[BATCH_SIZE, 1, 64, 64])\n model= StackedSRM(NB_STACKS)\n outputs_pred = model(inp=srm_im_pl, training=False)\n \n print(\"Restoring latest model from {}\".format(CHECKPOINTS_PATH_SRM))\n saver = tf.train.Saver()\n latest_checkpoint = tf.train.latest_checkpoint(CHECKPOINTS_PATH_SRM)\n print(\"Latest checkpoint: {}\\n\".format(latest_checkpoint))\n saver.restore(srm_sess, latest_checkpoint)\n \n#sys.exit(0)\n\nscorer_graph = tf.Graph()\nscorer_sess = tf.Session(graph=scorer_graph, config=config)\nwith scorer_graph.as_default():\n\n # DCGAN Scorer model\n print(\"Building DCGAN Scorer model ...\")\n sys.stdout.flush()\n scorer_im_pl = tf.placeholder(dtype=tf.float32, shape=[BATCH_SIZE, 1, 1000, 1000])\n model1 = DCGAN()\n _, ops = model1.discriminator_model(inp=scorer_im_pl, training=False, resize=True) # get discriminator output\n\n flat = ops[\"flat\"]\n model2 = Scorer_head()\n scores_pred = model2.scorer_head_model(features=flat, training=False)\n \n print(\"Restoring latest model from {}\".format(CHECKPOINTS_PATH_SCORER))\n saver = tf.train.Saver()\n latest_checkpoint = tf.train.latest_checkpoint(CHECKPOINTS_PATH_SCORER)\n print(\"Latest checkpoint: {}\\n\".format(latest_checkpoint))\n saver.restore(scorer_sess, latest_checkpoint)\n \n#sys.exit(0)\n\nif not os.path.exists(GENERATED_SAMPLES_DIR):\n os.makedirs(GENERATED_SAMPLES_DIR)\n\nupsampled_images_scores = []\nupsampled_images_names = []\ncounter = 0\nfor path in images_paths:\n print(counter)\n im_val = np.array([np.array(Image.open(path)).reshape([64, 64, 1]).transpose(2,0,1)]) # read image and put in channels first\n# print(im_val.shape)\n\n im_val = (im_val)/255.0 # normalize to [0, 1] for feeding to SRM\n \n srm_feed_dict = {srm_im_pl: im_val}\n last_output = srm_sess.run(outputs_pred[-1], srm_feed_dict)[:, :, 12:-12, 12:-12] # get the last output of the StackedSRM model and remove padding\n \n# print(last_output.shape)\n\n scorer_input = (last_output*2.0)-1 # renormalize to [-1, 1] to feed to scorer model\n \n score = scorer_sess.run(scores_pred, {scorer_im_pl: scorer_input})[0, 0]\n\n img = (last_output[0]*255.0).transpose(1,2,0).astype(\"uint8\")[:, :, 0] # denormalize output and convert to channels last format \n \n print(img.shape)\n print(\"min: {}, max: {}\".format(img.min(), img.max()))\n image = Image.fromarray(img)\n filename = path.split(\"/\")[-1].split(\".\")[0]\n# print(filename)\n image.save(os.path.join(GENERATED_SAMPLES_DIR, filename+\".png\"))\n\n upsampled_images_scores.append(score)\n upsampled_images_names.append(filename)\n counter += 1\n \ndf = pd.DataFrame(data={'Id': upsampled_images_names, 'Score': upsampled_images_scores})\n\nscores_file = os.path.join(LOG_DIR, \"gen_images_scores.csv\")\ndf.to_csv(scores_file, index=False)\nprint(\"Saved generated images scores at {}\".format(scores_file))\n \n \n \n \n \n \n \n \n","sub_path":"test_SRM_Scorer.py","file_name":"test_SRM_Scorer.py","file_ext":"py","file_size_in_byte":5607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"139404744","text":"'''\nAuthor: Puffrora\nDate: 2022-05-05 11:46:49\nLastModifiedBy: Puffrora\nLastEditTime: 2022-05-05 11:46:59\n'''\n\n\n# TC: O(logn)\n# SC: O(1)\nclass Solution:\n def numberOfMatches(self, n: int) -> int:\n\n res = 0\n while n > 1:\n if n & 1:\n res += (n - 1) // 2\n n = (n - 1) // 2 + 1\n else:\n res += n // 2\n n = n // 2\n \n return res\n\n\n# TC: O(1)\n# SC: O(1)\nclass Solution:\n def numberOfMatches(self, n: int) -> int:\n\n return n-1\n ","sub_path":"Leetcode/leetcode1688 比赛中的配对次数.py","file_name":"leetcode1688 比赛中的配对次数.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"256288706","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 20 16:20:23 2021\n\n@author: Will Cunningham, Juao-Guilherme Rosa & Ben Snyder\n\"\"\"\n\n#!/usr/bin/env python3\n\n#Question: How does increase in R impact dynamics of the LIF model? \n#Defend answer with equations, and biology, as we did for C in the IF model.\n#Determine how the firing rate of the LIF model varies with input I. Plot the firing rate vs I (the “f-I curve”).\n#\n#Answer: If current is kept constant, an increase in R causes an increase in firing rate.\n#Biologically, resistance is a function of the # of open ion channels (R is indirectly inversely proportional to the # of ion channels open).\n#More open channels corresponds to a lower resistance, and allows for a loss of ions (+ charge).\n#This combats the membrane potential change generated by injecting current.\n#Thus, for a constant injected current a greater resistance creates a higher membrane depolarization and greater firing rate.\n#\n#The code below shows how the firing frequency for a given current input increases with greater resistance.\n\n#Alternatively we can solve the differential equation:\n#v'(t) = (k/C-(v(t)/(R*C)))\n#shamelessly using wolfram alpha, we can see the solution of this equation is:\n#v(t) = c_1 e^(-t/(C*R)) + I * R ref: https://www.wolframalpha.com/input/?i=v%27%28t%29-%28k%2FC-%28v%28t%29%2F%28RC%29%29%29+%3D+0\n#solving for v(0) = 0 gives c_1 = -I*R, so the full equation is -I*R* e^(-t/(C R)) + I*R OR\n# v(t) = I*R(1-e^(-t/(C*R)))\n#further, solving for v(t) = 1 (since that's what we're using as threshold) gives:\n#t = R*C(log((IR)/(IR - 1))) https://www.wolframalpha.com/input/?i=c_1*c_2%281-e%5E%28-t%2F%28c_3*c_2%29%29%29+%3D+1+solve+for+t\n# we want the firing rate, which is the reciprical of the amount of time it takes to get a single fire: \n#f(I,R,C) = 1/(RC*log((IR)/(IR - 1)))\n#for C,R = 1,1 respectively: https://www.wolframalpha.com/input/?i=y+%3D++1%2F(xlog((x)%2F(x+-+1)))\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n\ndef return_spikes(I,C,R): #Input important parameters, and an option to plot V vs t.\n \n Vth = 1; #Define the voltage threshold.\n Vreset = 0; #Define the reset voltage.\n dt=0.01 #Set the timestep (0.01s or 10 ms)\n V = np.zeros([1000,1]) #Initialize V.\n V[0]=0.2; #Set the initial condition.\n spikes=0\n\n for k in range(0,999): #March forward in time,\n V[k+1] = V[k] + dt*(I/C-(V[k]/(R*C))) #Update the voltage, include resistance into the equation\n if V[k+1] > Vth: #... and check if the voltage exceeds the threshold.\n V[k+1] = Vreset #... if so, reset the voltage\n spikes = spikes+1 #number of spikes\n \n return(spikes) #provides output of spike number for use in graphing\n\ndef f_I(capacitance, resistance, color):\n max_current=100 #sets the number of currents to test\n slope=1 #Sets the step between tested currents\n fires = np.zeros([max_current,1]) #Creates empty array to fill with firing data\n current_range = np.array([slope*k for k in range(0,max_current)]) #Creates empty array to fill with current values to test in neuron model\n\n for i in range(0,max_current):\n fires[i]= return_spikes(current_range[i],capacitance,resistance)/10 #rate of firing (i.e. spikes per 10ms)\n\n plt.plot(current_range,fires,color)\n\n\n\n#Test a low and high value of both resistance and capacitance\nplt.xlim(0,30)\nplt.ylim(0,30)\nplt.xlabel('Current (I)')\nplt.ylabel('Frequency (spike/ms)')\n# plt.title('f-I curve'+ ' (Capacitance = ' + str(capacitance) + ', Resistance = ' + str(resistance) + ')')\nplt.title('f-I curve')\nplt.grid(True, linewidth=0.5, color='k', linestyle='-')\nf_I(1,.2,'b')\nf_I(2,.2,'b:')\nf_I(1,2,'r')\nf_I(2,2,'r:')\nplt.legend(['C=1, R=0.2','C=2, R=0.2','C=1, R=2','C=2, R=2'])\nplt.show()\n\n\n\n\n","sub_path":"20210921_fI Curve_Homework_WC+JR+BS.py","file_name":"20210921_fI Curve_Homework_WC+JR+BS.py","file_ext":"py","file_size_in_byte":3993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"85080882","text":"from typing import List\nfrom bisect import bisect\n\nfrom itertools import permutations\nfrom collections import defaultdict\n\ndd = defaultdict(str)\n# print(len(list(permutations(range(1, 100 + 1), 2))))\nfor a, b in permutations(range(1, 100 + 1), 2):\n if dd.get(a / b):\n continue\n dd[a / b] = str(str(a) + '/' + str(b))\nvs = sorted(dd.keys())[:3044]\n\n\n# print(vs)\n\nclass Solution:\n def simplifiedFractions(self, n: int) -> List[str]:\n if n == 1:\n return []\n\n # print(dd)\n index = bisect(vs, 1)\n # print(index)\n # print(dd[vs[index]])\n ans = list()\n for i in range(3044):\n if vs[i] >= n:\n continue\n v = dd[vs[i]]\n a, b = [int(x) for x in v.split('/')]\n if a < n and b <= n:\n ans.append(v)\n\n return ans\n\n\n\nif __name__ == '__main__':\n S = Solution()\n S.simplifiedFractions(3)\n","sub_path":"zs/26/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"533687961","text":"import re\r\n############ 2 - 1 ###########\r\nfrom calculation import *\r\n##############################\r\n\r\n\r\ndef inputData():\r\n############ 2 - 2 ###########\r\n # input() 메소드를 통해, 문자열을 입력받습니다.\r\n inputs = input()\r\n\r\n # regex를 통해 inputs 에 있는 숫자들을 찾아 nums에 저장합니다.\r\n # 이 때, map 함수를 통해 re.findall() 의 반환 object 의 element들을\r\n # 숫자 형으로 바꿔주고, list() object 로 변환하여 nums 에 저장합니다.\r\n # 이는 re.findall의 리턴 리스트는 string 형태의 원소이므로, 숫자로 바꾸어줍니다.\r\n nums = list(map(int, re.findall(\"\\d+\", inputs)))\r\n # find_operator 함수를 통해 연산기호를 추출하여, operator 에 저장합니다.\r\n operator = find_operator(inputs)\r\n return nums, operator\r\n##############################\r\n \r\n\r\ndef find_operator(input_sentence):\r\n############ 2 - 3 ###########\r\n # 연산기호 패턴을 사용하여, input_sentence 에서 연산기호를 찾아줍니다.\r\n operator = re.findall(\"[-|+|/|*]\", input_sentence)\r\n return operator\r\n##############################\r\n\r\n\r\ndef calculation(input_num, input_operator):\r\n############ 2 - 4 ###########\r\n # 연산기호에 대해 calculation module에서 매칭 되는 함수를 memory 개념으로 dict object 를 활용하여 저장합니다.\r\n operation_dict = {'+': 'cal_add',\r\n '-': 'cal_subtract',\r\n '*': 'cal_multiply',\r\n '/': 'cal_divide'\r\n }\r\n\r\n # inputData()를 통해 넘겨 받은 input_num 은 list형이므로, 각각의 원소를 num1, num2 로 받아줍니다.\r\n num1, num2 = input_num\r\n # inputData()를 통해 넘겨 받은 input_operator 역시 list 형이므로, operator 원소를 operator로 받아줍니다.\r\n operator = input_operator[0]\r\n # operation_dict 에서 연산기호를 key로, 해당 함수 이름을 value 로 가져옵니다.\r\n # eval() python built-in 메소드를 통해, string 을 파이썬 object 로 바로 mapping해줍니다.\r\n # 이 코드에서는 string 으로 되어 있는 함수 이름을 앞서 선언된 함수 object로 바꾸어 주고, 입력 num1, num2 를 받습니다.\r\n result = eval(operation_dict[operator])(num1, num2)\r\n return result\r\n##############################\r\n\r\n\r\ndef save_result(input_num, input_operator, ans):\r\n############ 2 - 5 ###########\r\n output_string = '{}{}{}={}'.format(input_num[0], input_operator[0], input_num[1], ans)\r\n with open('./result.txt', 'a') as f:\r\n f.write(output_string + '\\n')\r\n##############################\r\n\r\n\r\ninput_num, input_operator = inputData()\r\nans = calculation(input_num, input_operator)\r\nsave_result(input_num, input_operator, ans)\r\n","sub_path":"HW2/HW2.py","file_name":"HW2.py","file_ext":"py","file_size_in_byte":2810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"168158294","text":"#-*-coding:utf-8-*-\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom being.models import getbeingbyuser,getking,getqueen,getcounterpart,Being,mailcall\nfrom notice.models import getunreadnoticecount,newnotice\nimport datetime,re\n\n\nclass Clause(models.Model):\n '''the Clause of the law'''\n section=models.CharField(max_length=128,null=True)\n creater=models.ForeignKey(User)\n description=models.CharField(max_length=512)\n startdate=models.DateField(default=datetime.date.today)\n enddate=models.DateField(default=datetime.date(9999,8,15))\n status=models.CharField(max_length=16,choices=(('生效中','生效中'),('审核中','审核中')))\n card=models.CharField(max_length=16,choices=(('yellow','黄牌'),('red','红牌'),('death','死亡牌'),('info','说明')))\n class Meta:\n ordering=['section']\n\n def __unicode__(self):\n return u'%s' % (self.description)\n\n def link(self):\n l=len(self.description)\n return ''+(self.description if l<6 else self.description[:6]+' ...')+''\n\nclass ClauseRecord(models.Model):\n '''the record of the the law'''\n clause=models.ForeignKey(Clause)\n user=models.ForeignKey(User)\n operation=models.CharField(max_length=16,choices=(('创建','创建'),('更新','更新'),('批准','批准')))\n happentime=models.DateTimeField(default=datetime.datetime.now)\n class Meta:\n ordering=['-happentime']\n\n def say(self):\n being=getbeingbyuser(self.user)\n t=self.happentime\n #return self.clause.link()\n return being.link()+u'在'+t.strftime('%Y年%m月%d日').decode('utf-8')+self.operation+u'了条款'+self.clause.link()\n\nclass FaultRecord(models.Model):\n '''the record of the the law'''\n clause=models.ForeignKey(Clause,null=True,blank=True)\n user=models.ForeignKey(User)\n status=models.CharField(max_length=16,choices=(('confirmed','confirmed'),('confirming','confirming')))\n reason=models.CharField(max_length=256,blank=True,null=True)\n happentime=models.DateTimeField(default=datetime.datetime.now)\n class Meta:\n ordering=['-happentime']\n\n def say(self):\n being=getbeingbyuser(self.user)\n t=self.happentime\n if self.clause:\n return being.link()+u'同学在'+t.strftime('%Y年%m月%d日').decode('utf-8')+u'违反了条款'+self.clause.link()+u',实在是让人生气,给与'+self.clause.card+u'一张,并通报批评,希望'+(u'他' if being.gender=='male' else u'她')+u'能够改过自新,从新做人。'\n else:\n return being.link()+u'同学在'+t.strftime('%Y年%m月%d日').decode('utf-8')+u'被罚红牌一张,理由是:“'+self.reason+u'”,并通报批评,希望'+(u'他' if being.gender=='male' else u'她')+u'能够改过自新,从新做人。'\n\nclass FaultStats(models.Model):\n '''count the record of the the law'''\n being=models.OneToOneField(Being)\n pb=models.IntegerField() #weight 8\n ytxs=models.IntegerField() #weight 4\n fwc=models.IntegerField() #weight 2\n ywqz=models.IntegerField() #weight 1\n cancel=models.IntegerField() #weight 1\n\n def clear(self):\n self.ytxs=0\n self.fwc=0\n self.pb=0\n self.ywqz=0\n self.cancel=0\n\n def remain(self):\n cards=getcards(self.being.user)\n total=cards[0]*24+cards[1]*48+cards[2]*96\n res=total-(self.cancel+self.ywqz+self.fwc*2+self.ytxs*4+self.pb*8)\n return res\n\n def do(self,ywqz,fwc,ytxs,pb,cancel):\n self.ywqz+=ywqz\n self.fwc+=fwc\n self.ytxs+=ytxs\n self.pb+=pb\n self.cancel+=cancel\n self.save()\n return True\n\n#about the law\ndef getsections():\n sectionlist=Clause.objects.values_list('section',flat=True).distinct()\n sectionlist=sectionlist.extra(select={\n 'section_null':\"section IS NULL OR section=''\"},\n order_by=['section_null','section'])\n return sectionlist\n\ndef getclauses():\n qs=Clause.objects.all()\n qs=qs.extra(select={\n 'section_null':\"section IS NULL OR section=''\"},\n order_by=['section_null','section'])\n return qs\n\ndef getclause(cid):\n return Clause.objects.get(id=cid)\n\ndef updatesection(oldsection,newsection):\n qs=Clause.objects.filter(section=oldsection)\n for cl in qs:\n cl.section=newsection\n cl.save()\n\ndef newclause(creater,startdate,enddate,description,section,card):\n enddate=enddate if enddate!=None else datetime.date(9999,8,15)\n cl=Clause(creater=creater,description=description,startdate=startdate,enddate=enddate,section=section,status=\"审核中\",card=card)\n cl.save()\n mailcall(user=getcounterpart(creater).user,title=u'【星际快讯】新的法律制定',content=getbeingbyuser(creater).nickname+u'添加了新的法律条文.(http://www.yanglala.com/country/clause_audit/)。请尽快审核吧~~~')\n return cl\n\ndef deleteclause(cid):\n clause=getclause(cid=cid)\n clause.delete()\n return True\n\ndef requestdeleteclause(user,cid):\n clause=getclause(cid=cid)\n clause.creater=user\n clause.status='请求删除中'\n clause.save()\n mailcall(user=getcounterpart(user).user,title=u'【星际快讯】星际法删除',content=getbeingbyuser(user).nickname+u'申请删除条款.(http://www.yanglala.com/country/clause_audit/)。赶快去审核吧~~~')\n return True\n\ndef updateclause(user,cid,description,section,card,startdate=datetime.date.today(),enddate=datetime.date(9999,8,15)):\n clause=Clause.objects.get(id=cid)\n clause.section=section\n clause.creater=user\n clause.startdate=startdate\n clause.description=description\n clause.enddate=enddate\n clause.card=card\n clause.status='审核中'\n clause.save()\n mailcall(user=getcounterpart(user).user,title=u'【星际快讯】星际法修订',content=getbeingbyuser(user).nickname+u'修订了法律.(http://www.yanglala.com/country/clause_audit/)。赶快去审核该修订吧~~~')\n return True\n\n#about the law record\ndef getrecordsbyuser(user):\n recs=ClauseRecord.objects.filter(user=user)\n return recs\n\ndef getrecordsbycid(cid):\n recs=ClauseRecord.objects.filter(clause_id=cid)\n return recs\n\ndef getfaultsbyuser(user):\n qs=FaultRecord.objects.filter(user=user)\n return qs\n\ndef getfault(fid):\n qs=FaultRecord.objects.get(id=fid)\n return qs\ndef getfaults():\n qs=FaultRecord.objects.all()\n return qs\n\ndef getconfirming(user):\n qs=FaultRecord.objects.filter(user=user,status='confirming')\n return qs\n\n\ndef getcards(user):\n faults=getfaultsbyuser(user)\n yelc=faults.filter(clause__card='黄牌',status='confirmed').count()\n redc=faults.filter(clause__card='红牌',status='confirmed').count()+faults.filter(clause__isnull=True,status='confirmed').count()\n deac=faults.filter(clause__card='死亡牌',status='confirmed').count()\n return [yelc,redc,deac]\n\ndef newrecord(cid,user,operation):\n lr=ClauseRecord(clause_id=cid,user=user,operation=operation)\n lr.save()\n return lr\n\ndef newfault(cid,user,currentuser):\n cls=getclause(cid)\n if currentuser==user:\n fr=FaultRecord(clause_id=cid,user=user,status='confirmed')\n mailcall(user=getcounterpart(currentuser).user,title=u'【星际快讯】今日说法',content=u'某不良少年'+getbeingbyuser(currentuser).nickname+u'自愿承认触犯了以下条款:'+cls.description+u', (http://www.yanglala.com/country/fault_list/)。大家赶快一起来嘲笑他吧~~~')\n else:\n fr=FaultRecord(clause_id=cid,user=user,status='confirming')\n mailcall(user=getcounterpart(currentuser).user,title=u'【星际快讯】法院召唤你',content=getbeingbyuser(currentuser).nickname+u'认为你有如下罪行:'+cls.description+u', 将被处以'+cls.card+u', 笨蛋你还不从实招来!(http://www.yanglala.com/country/clause_audit/)~~~')\n fr.save()\n return fr\n\ndef confirmfault(fid):\n fr=FaultRecord.objects.get(id=fid)\n fr.status='confirmed'\n fr.save()\n mailcall(user=getcounterpart(fr.user).user,title=u'【星际快讯】今日说法',content=u'潜逃多日的罪犯'+getbeingbyuser(fr.user).nickname+u'于今日俯首认罪, 按照星球法:'+fr.clause.description+u' 被处以'+fr.clause.card+u'.(http://www.yanglala.com/country/fault_list/)~~~')\n return fr\n\ndef deleterecord(rid):\n rec=ClauseRecord.objects.get(id=rid)\n rec.delete()\n return True\n\ndef deletefault(fid):\n rec=FaultRecord.objects.get(id=fid)\n rec.delete()\n return True\n\ndef getauditing(user):\n being=getcounterpart(user)\n al=Clause.objects.filter(status='审核中',creater=being.user)\n return al\ndef getdeleting(user):\n being=getcounterpart(user)\n al=Clause.objects.filter(status='请求删除中',creater=being.user)\n return al\n\n\ndef audit(user,action,cid):\n clause=Clause.objects.get(id=cid)\n if user==clause.creater:\n return False\n if clause.status==u'审核中':\n if action=='approve':\n clause.status=u'生效中'\n clause.save()\n newrecord(cid,user,'批准')\n newnotice(getcounterpart(user).user,getbeingbyuser(user).nickname+u'在'+datetime.datetime.now().strftime('%Y年%m月%d日').decode('utf-8')+u'批准了条款:'+clause.link())\n elif action=='reject':\n clausedes=clause.description\n clause.delete()\n newnotice(getcounterpart(user).user,getbeingbyuser(user).nickname+u'在'+datetime.datetime.now().strftime('%Y年%m月%d日').decode('utf-8')+u'删除了条款:'+clausedes)\n return True\n elif clause.status==u'请求删除中':\n if action=='approve':\n clausedes=clause.description\n clause.delete()\n newnotice(getcounterpart(user).user,getbeingbyuser(user).nickname+u'在'+datetime.datetime.now().strftime('%Y年%m月%d日').decode('utf-8')+u'同意了删除条款:'+clausedes)\n elif action=='reject':\n clause.status=u'生效中'\n clause.save()\n newrecord(cid,user,'拒删')\n newnotice(getcounterpart(user).user,getbeingbyuser(user).nickname+u'在'+datetime.datetime.now().strftime('%Y年%m月%d日').decode('utf-8')+u'拒绝删除条款:'+clause.link())\n\ndef getnoticestatus(user):\n if (not user) or (not user.is_authenticated()):\n return [0,0,0]\n d=[0,0,0]\n d[0]=getauditing(user).count()+getconfirming(user).count()+getdeleting(user).count()\n d[1]=getunreadnoticecount(user)\n return d\n\ndef bonusfault(user,currentuser,reason):\n if currentuser==user:\n fr=FaultRecord(user=user,status='confirmed',reason=reason)\n else:\n fr=FaultRecord(user=user,status='confirming',reason=reason)\n fr.save()\n\n","sub_path":"country/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"544749434","text":"import numpy as np\nfrom Example_RF_classifier import build_features_vector\nimport pickle\nfrom sklearn.metrics import accuracy_score\n__author__ = \"Ulysse Cote-Allard and David St-Onge\"\n__copyright__ = \"Copyright 2007, MIST Lab\"\n__credits__ = [\"David St-Onge\", \"Ulysse Cote-Allard\", \"Kyrre Glette\", \"Benoit Gosselin\", \"Giovanni Beltrame\"]\n__license__ = \"MIT\"\n__version__ = \"1.0\"\n__maintainer__ = \"David St-Onge\"\n__email__ = \"david.st-onge@polymtl.ca\"\n__status__ = \"Production\"\n\nclass DataLoader(object):\n \"\"\"\n General utility class to load and build both the training dataset and the performance\n \"\"\"\n def __init__(self):\n self._number_of_IMU_data_per_example = 50\n self._size_non_overlap = 5\n\n\n def format_data_to_train(self, vector_to_format_emg, vector_to_format_imu_1, vector_to_format_imu_2):\n \"\"\"\n Function to format (calculate the features employed as input by the classifier and build the examples in their right shape)\n\n :param vector_to_format_emg: The emg recording to format into examples\n :param vector_to_format_imu_1: The IMU from the armband placed on the arm to format into examples\n :param vector_to_format_imu_2: The IMU from the armband placed on the leg to format into examples\n\n :return Return: Three arrays containing the examples for the EMG, IMU-Arm, IMU-Leg respectively\n \"\"\"\n # We set number_of_IMU_data_per_example to 50 as a base so that each example is one second in length (IMU is cadenced at 50Hz)\n indice_EMG = 0\n indice_IMU_1 = 0\n indice_IMU_2 = 0\n example_imu_1 = []\n example_imu_2 = []\n example_emg = []\n\n dataset_examples_formatted_emg = []\n dataset_examples_formatted_imu_1 = []\n dataset_examples_formatted_imu_2 = []\n\n # Continue looping until one of the vector has run out of new data to give.\n while (indice_EMG < len(vector_to_format_emg) and indice_IMU_1 < len(vector_to_format_imu_1) and indice_IMU_2 < len(vector_to_format_imu_2)):\n if indice_EMG + 32 >= len(vector_to_format_emg) or indice_IMU_1 + 3 >= len(vector_to_format_imu_1) or indice_IMU_2 + 3 >= len(\n vector_to_format_imu_2): # We reached the end, we want to get free of the loop\n break\n\n for i in range(4): # EMG is at 200Hz, IMU at 50 (4 EMG entry for 1 IMU entry)\n # There's eight EMG channels\n example_emg.append(vector_to_format_emg[indice_EMG:(indice_EMG + 8)])\n indice_EMG += 8\n # There's three IMU channels\n example_imu_1.append(vector_to_format_imu_1[indice_IMU_1:(indice_IMU_1 + 3)])\n example_imu_2.append(vector_to_format_imu_2[indice_IMU_2:(indice_IMU_2 + 3)])\n indice_IMU_1 += 3\n indice_IMU_2 += 3\n if len(example_imu_1) >= self._number_of_IMU_data_per_example:\n # Reshape the example to have 10 sub-examples per examples\n example_emg = np.reshape(example_emg, newshape=(10, 20, 8)).tolist()\n example_imu_1 = np.reshape(example_imu_1, newshape=(10, 5, 3)).tolist()\n example_imu_2 = np.reshape(example_imu_2, newshape=(10, 5, 3)).tolist()\n\n # Get the features for the EMG and IMU\n emg_example, imu_1_example, imu_2_example = build_features_vector.build_features_vector(example_emg,\n example_imu_1,\n example_imu_2)\n\n dataset_examples_formatted_emg.append(emg_example)\n dataset_examples_formatted_imu_1.append(imu_1_example)\n dataset_examples_formatted_imu_2.append(imu_2_example)\n\n # Reshape the arrays the way they were constructed\n example_emg = np.reshape(example_emg, newshape=(200, 8)).tolist()\n example_imu_1 = np.reshape(example_imu_1, newshape=(50, 3)).tolist()\n example_imu_2 = np.reshape(example_imu_2, newshape=(50, 3)).tolist()\n\n # Remove only part of the data accumulated to obtain sliding window over the dataset\n example_emg = example_emg[4 * self._size_non_overlap::]\n example_imu_1 = example_imu_1[self._size_non_overlap::]\n example_imu_2 = example_imu_2[self._size_non_overlap::]\n\n return np.array(dataset_examples_formatted_emg), np.array(dataset_examples_formatted_imu_1), np.array(dataset_examples_formatted_imu_2)\n\n def read_training(self, path, number_of_classes):\n \"\"\"\n Function to build the training dataset.\n\n :param path: the path that contain the training dataset recording\n :param number_of_classes: number of moods made by the performer during training\n\n :return Return: Four arrays containing the training dataset for the EMG, IMU-Arm, IMU-Leg recording and the labels respectively\n \"\"\"\n try:\n print(\"Reading Data\")\n X_emg = []\n X_imu_1 = []\n X_imu_2 = []\n Y = []\n print(number_of_classes)\n for i in range(number_of_classes * 3):\n data_read_from_file_emg = np.fromfile(path + \"\\\\classe_%d_emg.dat\" % i, dtype=np.int32)\n data_read_from_file_imu_1 = np.fromfile(path + \"\\\\classe_%d_first_imu.dat\" % i,\n dtype=np.float32)\n data_read_from_file_imu_2 = np.fromfile(path + \"\\\\classe_%d_second_imu.dat\" % i,\n dtype=np.float32)\n\n dataset_examples_formatted_emg, dataset_examples_formatted_imu_1, dataset_examples_formatted_imu_2 = self.format_data_to_train(\n data_read_from_file_emg, data_read_from_file_imu_1, data_read_from_file_imu_2)\n\n X_emg.extend(dataset_examples_formatted_emg)\n X_imu_1.extend(dataset_examples_formatted_imu_1)\n X_imu_2.extend(dataset_examples_formatted_imu_2)\n if i < number_of_classes:\n Y.extend(i + np.zeros(dataset_examples_formatted_imu_1.shape[0]))\n elif i < number_of_classes * 2:\n Y.extend((i - number_of_classes) + np.zeros(dataset_examples_formatted_imu_1.shape[0]))\n else:\n Y.extend((i - (number_of_classes * 2)) + np.zeros(dataset_examples_formatted_imu_1.shape[0]))\n print(Y)\n\n return X_emg, X_imu_1, X_imu_2, Y\n except Exception as e:\n print(e)\n\n def read_performance_data(self, path_seance, path_labels, dataset_delay):\n \"\"\"\n Function to build the training dataset.\n\n :param path_seance: the path that contain the performance recording\n :param path_labels: the path that contain the true labels for this performance\n :param dataset_delay: the number of second that separate the recording from the Armbands with the video utilized to classify the dataset\n\n :return Return: Five arrays containing the performance dataset for the EMG, IMU-Arm, IMU-Leg recording. the labels and the accuracy\n obtained by the\n classifier during the live performance respectively\n \"\"\"\n dataset = pickle.load(open(path_seance + \"\\\\dataset_predicted.p\", \"rb\"))\n true_labels_array = np.array(path_labels)\n\n prediction = []\n dataset_emg = []\n dataset_imu_1 = []\n dataset_imu_2 = []\n timestamp = []\n all_emg = []\n all_imu_1 = []\n all_imu_2 = []\n\n index_timestamp_delay = -1\n index = 0\n\n current_index_true_label = 0\n true_labels_seance = []\n '''\n j will be an array containing all the information relating to the current example within the performance dataset.\n j[0] contains the EMG recording of the example\n j[1] contains the IMU recording of the Myo on the arm\n j[2] contains the IMU recording of the Myo on the leg\n j[3] contains the timestamp of every examples\n j[4] contains the live prediction of the current example obtained during the performance\n '''\n for j in dataset:\n current_timestamp = j[3]\n\n # If the current time of the example is over the end of the performance, stop reading the performance data\n if true_labels_array[current_index_true_label][0] == -1 and \\\n current_timestamp >= true_labels_array[current_index_true_label][1]:\n break\n\n # If the current time of the example is over the next true label timestamp, change update the true label employed.\n if current_timestamp >= true_labels_array[0][1]:\n if current_timestamp >= true_labels_array[current_index_true_label][1]:\n current_index_true_label += 1\n true_labels_seance.append(true_labels_array[current_index_true_label - 1][0])\n timestamp.append(current_timestamp)\n\n if index_timestamp_delay == -1 and current_timestamp >= dataset_delay:\n index_timestamp_delay = index\n\n # Build the performance dataset while synchronizing it to the video. The synchronization was made manually using the visual cues from the\n # swarm activity.\n if dataset_delay < 0:\n if current_timestamp > -1 * dataset_delay:\n emg = np.array(j[0]).reshape(len(j[0]) * len(j[0][0]), len(j[0][0][0]))\n all_emg.append(emg)\n imu_1 = np.array(j[1]).reshape(len(j[1]) * len(j[1][0]), len(j[1][0][0]))\n all_imu_1.append(imu_1)\n imu_2 = np.array(j[2]).reshape(len(j[2]) * len(j[2][0]), len(j[2][0][0]))\n all_imu_2.append(imu_2)\n\n emg_example, imu_1_example, imu_2_example = build_features_vector.build_features_vector(j[0], j[1], j[2])\n\n dataset_emg.append(emg_example)\n dataset_imu_1.append(imu_1_example)\n dataset_imu_2.append(imu_2_example)\n\n prediction.append(int(j[4]))\n else:\n emg = np.array(j[0]).reshape(len(j[0]) * len(j[0][0]), len(j[0][0][0]))\n all_emg.append(emg)\n imu_1 = np.array(j[1]).reshape(len(j[1]) * len(j[1][0]), len(j[1][0][0]))\n all_imu_1.append(imu_1)\n imu_2 = np.array(j[2]).reshape(len(j[2]) * len(j[2][0]), len(j[2][0][0]))\n all_imu_2.append(imu_2)\n\n emg_example, imu_1_example, imu_2_example = build_features_vector.build_features_vector(j[0], j[1], j[2])\n\n dataset_emg.append(emg_example)\n dataset_imu_1.append(imu_1_example)\n dataset_imu_2.append(imu_2_example)\n\n prediction.append(int(j[4]))\n index += 1\n\n # Synchronize the recording of the armbands of with the true labels obtained by watching the performance's video.\n if dataset_delay > 0:\n true_labels_seance = true_labels_seance[index_timestamp_delay::]\n\n dataset_emg = dataset_emg[:len(true_labels_seance)]\n dataset_imu_1 = dataset_imu_1[:len(true_labels_seance)]\n dataset_imu_2 = dataset_imu_2[:len(true_labels_seance)]\n\n print(\"EMG : \", np.shape(dataset_emg))\n print(\"IMU 1 : \", np.shape(dataset_imu_1))\n print(\"IMU 2 : \", np.shape(dataset_imu_2))\n\n prediction = prediction[:len(true_labels_seance)]\n print(\"PREDICTION : \", prediction)\n print(\"TRUE LABELS: \", true_labels_seance)\n print(\"ACCURACY PREDICTIONS: \", accuracy_score(true_labels_seance, prediction))\n accuracy = accuracy_score(true_labels_seance, prediction)\n\n return dataset_emg, dataset_imu_1, dataset_imu_2, true_labels_seance, accuracy","sub_path":"load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":12093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"437384459","text":"import numpy as np\n\n#-------------------------------------------Functions-------------------------------------------\n\ndef get_Embeddings(data=[], selected_terms = set()):\n\timport os\n\timport pickle\n\n\tfileName = \"saveFiles/StkOvf_Embeddings.pkl\"\n\tif os.path.exists(fileName):\n\t\twith open(fileName, 'rb') as temp:\n\t\t\tdata_vectors, embeddings, maxSize, embedding_vocab = pickle.load(temp)\n\telse:\n\t\tall_docs = list(data)\n\n\t\t# Get Embeddings\n\t\tfrom Tools.Load_Embedings import Get_Embeddings\n\t\tembeddingGenerator = Get_Embeddings()\n\t\tdata_vectors, embeddings, maxSize, embedding_vocab = embeddingGenerator.googleVecs(all_docs, selected_terms)\n\t\tdel embeddingGenerator\n\t\tfrom keras.preprocessing.sequence import pad_sequences\n\t\tdata_vectors = pad_sequences(data_vectors, maxlen=maxSize, padding='post', value=0.)\n\n\t\t# with open(fileName, 'wb') as temp:\n\t\t# \tpickle.dump((data_vectors, embeddings, maxSize, embedding_vocab), temp)\n\n\tprint(\"Embeddings Shape : \",embeddings.shape)\n\treturn (data_vectors, embeddings, maxSize, embedding_vocab)\n\n\n#-------------------------------------------Prepare Data-------------------------------------------\n\nfrom Tools.getStackOverflow import getStackOverflow\nfrom random import sample\n\n# SOvrflow = getStackOverflow(\"/Volumes/Files/Work/Research/Information Retrieval/1) Data/StackOverflow-Dataset/\")\nSOvrflow = getStackOverflow(\"/home/sounak/Datasets/StackOverflow-Dataset/\")\ndata = SOvrflow.getData()\nlabels = SOvrflow.getTarget()\n\n## Binarize Labels ##\nfrom sklearn.preprocessing import LabelBinarizer\nlb = LabelBinarizer()\nlabels = lb.fit_transform(labels)\nprint(\"Label dimention : \", labels.shape)\n\nfrom Tools.Feature_Extraction import chisqure\nselected_terms = chisqure(data, labels, feature_count = 1500)\n\n## Process Dataset ##\ndata_vectors, embeddings, maxSize, embedding_vocab = get_Embeddings(data, selected_terms)\n\n\n#-------------------------------------------Classification-------------------------------------------\n\ntotrec = 0.0\ntotprec = 0.0\ntotF1 = 0.0\n\nfrom sklearn.model_selection import KFold\nkf = KFold(n_splits=5)\nfrom Tools.Classifier import CNN_Classifier, RNN_Classifier, Nested_CNN_Classifier\n\nclassifier = CNN_Classifier(filter_sizes=[5,7,9], filter_counts=[500,350,250], pool_windows=[6,4,3], learning_rate=0.001, batch_size=64, num_epochs=7)\n# classifier = Nested_CNN_Classifier(filter_sizes=[6,2], filter_counts=[300,150], pool_windows=[2,2], learning_rate=0.001, batch_size=64, num_epochs=7)\n# classifier = RNN_Classifier(output_size=512, learning_rate=0.001, batch_size=7, num_epochs=100)\n\nfor train_indices, test_indices in kf.split(data_vectors):\n\ttrain_doc_vectors, train_labels = [data_vectors[i] for i in train_indices], labels[train_indices] #[labels[i] for i in train_indices]\n\ttest_doc_vectors, test_labels = [data_vectors[i] for i in test_indices], labels[test_indices] #[labels[i] for i in test_indices]\n\n\tnew = classifier.predict(np.array(train_doc_vectors), train_labels, np.array(test_doc_vectors), test_labels, embeddings, maxSize, train_labels.shape[1])\n\n\n# from sklearn.metrics import f1_score, precision_score, recall_score\n#\n# #MICRO\n# precision = precision_score(test_labels, predictions, average='micro')\n# totprec += precision\n# recall = recall_score(test_labels, predictions, average='micro')\n# totrec += recall\n# f1 = f1_score(test_labels, predictions, average='micro')\n# totF1 += f1\n#\n# print(\"Micro-average quality numbers\")\n# print(\"Precision: {:.4f}, Recall: {:.4f}, F1-measure: {:.4f}\"\n# .format(precision, recall, f1))\n#\n# #MACRO\n# precision = precision_score(test_labels, predictions, average='macro')\n# recall = recall_score(test_labels, predictions, average='macro')\n# f1 = f1_score(test_labels, predictions, average='macro')\n#\n# print(\"Macro-average quality numbers\")\n# print(\"Precision: {:.4f}, Recall: {:.4f}, F1-measure: {:.4f}\"\n# .format(precision, recall, f1))\n#\n# #INDIVIDUAL\n# precision = precision_score(test_labels, predictions, average=None)\n# recall = recall_score(test_labels, predictions, average=None)\n# f1 = f1_score(test_labels, predictions, average=None)\n#\n# print(\"All-Class quality numbers\")\n# print(\"Precision: \\n{}, \\nRecall: \\n{}, \\nF1-measure: \\n{}\"\n# .format(precision, recall, f1))\n#\n# print \"10-fold Micro average:\"\n# print(\"Precision: \\n{}, \\nRecall: \\n{}, \\nF1-measure: \\n{}\"\n# .format(totprec/K, totrec/K, totF1/K))\n#\n#\n#\n# # Transform multilabel labels\n# train_labels = [(labels[i],) for i in train_indices]\n# test_labels = [(labels[i],) for i in test_indices]\n# mlb = MultiLabelBinarizer()\n# train_labels = mlb.fit_transform(train_labels)\n# test_labels = mlb.transform(test_labels)\n# import numpy as np\n# exp_train = np.sum(train_labels, axis=0)\n# exp_test = np.sum(test_labels, axis=0)\n# print \"Num of train docs per category:\\n\", exp_train\n# print \"Num of test docs per category:\\n\", exp_test\n#\n#\n# #Export to Spreadsheet\n# import xlsxwriter\n#\n# export = np.column_stack((exp_train, exp_test, f1, precision, recall))\n# workbook = xlsxwriter.Workbook('classscores.xlsx')\n# worksheet = workbook.add_worksheet()\n# row = 0\n# for (x,y), value in np.ndenumerate(export):\n# worksheet.write(x, y, value)\n# workbook.close()\n","sub_path":"Sentence Classification/NN_StackOverflow.py","file_name":"NN_StackOverflow.py","file_ext":"py","file_size_in_byte":5278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"491022873","text":"from flask import Blueprint, abort, jsonify, make_response\nfrom application.models import *\nfrom application.helpers import db_tools\n\napi = Blueprint('api', __name__,)\n\n@api.route('/catalog')\ndef catalog_json():\n categories = db_tools.get_categories()\n return jsonify(categories=[i.serialize for i in categories])\n\n@api.route('/catalog/category/')\ndef category_json(category_slug):\n category = Category.query.filter_by(slug=category_slug).first()\n items = Item.query.join(Category).filter(Category.id == category.id).order_by(db.asc(Item.name)).all()\n item_count = len(items)\n data = {\n 'id': category.id,\n 'name': category.name,\n 'slug': category.slug,\n 'item_count': item_count,\n 'items': [i.serialize for i in items]\n }\n # return jsonify(data)\n return jsonify(category.serialize)\n\n@api.route('/catalog/category//item/')\ndef item_json(category_slug, item_slug):\n item = Item.query.join(Category).filter(Category.slug == category_slug,\n Item.slug == item_slug).first()\n return jsonify(item.serialize)\n\n@api.errorhandler(404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)","sub_path":"application/views/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"292380940","text":"from sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.preprocessing import StandardScaler\n\niris = load_iris()\nx_train , x_test , y_train , y_test = train_test_split(iris.data,iris.target,test_size=0.2)\nstd = StandardScaler()\nx_train = std.fit_transform(x_train)\nx_test = std.transform(x_test)\nknn = KNeighborsClassifier(n_neighbors=5)\nknn.fit(x_train, y_train)\ny_predict = knn.predict(x_test)\nprint('預測結果:{}'.format(y_predict))\nprint('準確率:{}'.format(knn.score(x_test, y_test)))\n","sub_path":"_4.python/__code/Python自學聖經(第二版)/ch23/iris.py","file_name":"iris.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"204272074","text":"# -*- coding: utf-8 -*-\n\"\"\"\n preprocess.preprocess_patch\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n An API for a patch preprocessing, for concrete classification model architecture.\n\"\"\"\n\nimport numpy as np\nimport keras.backend as K\n\n\ndef preprocess_patch_LR3DCNN(dicom_array, centroid):\n \"\"\"Patch preprocessing function for LR3DCNN architecture.\n\n Args:\n dicom_array (ndarray): numpy-array containing the 3D-representation\n of the DICOM-series\n centroids (dict): A centroid's dict of the form::\n {'x': int,\n 'y': int,\n 'z': int}\n\n Returns:\n list[ndarray, ndarray, ndarray]\n\n \"\"\"\n in_shapes = [(12, 21, 21),\n (21, 12, 21),\n (21, 21, 12)]\n\n in_patch = [dicom_array[centroid['x'] - in_shape[0]: centroid['x'] + in_shape[0],\n centroid['y'] - in_shape[1]: centroid['y'] + in_shape[1],\n centroid['z'] - in_shape[2]: centroid['z'] + in_shape[2]]\n for i, in_shape in enumerate(in_shapes)]\n\n return in_patch\n\n\ndef preprocess_LR3DCNN(dicom_array, centroids):\n \"\"\"Peprocess function for LR3DCNN architecture.\n\n Args:\n dicom_array (ndarray): numpy-array containing the 3D-representation\n of the DICOM-series\n centroids (list(dict)): A list of centroids of the form::\n {'x': int,\n 'y': int,\n 'z': int}\n\n Returns:\n list[ndarray, ndarray, ndarray]\n\n \"\"\"\n LR3DCNN_input = [[], [], []]\n for centroid in centroids:\n patch = preprocess_patch_LR3DCNN(dicom_array, centroid)\n for i in range(len(LR3DCNN_input)):\n LR3DCNN_input[i].append(patch[i])\n\n if K.image_data_format() == 'channels_last':\n channel_axis = -1\n else:\n channel_axis = 1\n\n for i in range(len(LR3DCNN_input)):\n LR3DCNN_input[i] = np.asarray(LR3DCNN_input[i])\n LR3DCNN_input[i] = np.expand_dims(LR3DCNN_input[i], channel_axis)\n\n return LR3DCNN_input\n","sub_path":"prediction/src/algorithms/classify/src/preprocess_patch.py","file_name":"preprocess_patch.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"582780707","text":"#### Combined standard/sandbox VM Workflow Task\nimport time\n\ndef run(helper, options):\n\n\t# Find out which workflow we're wanting to run\n\tworkflow = options['workflow']\n\n\t# Configuration of task\n\tif workflow == 'standard':\n\t\tprefix = options['wfconfig']['PREFIX']\n\t\tvcenter_tag = options['wfconfig']['VCENTER_TAG']\n\t\tdomain = options['wfconfig']['DOMAIN']\n\t\tnetwork = options['wfconfig']['NETWORK']\n\t\tgateway = options['wfconfig']['GATEWAY']\n\t\tnetmask = options['wfconfig']['NETMASK']\n\t\tdns_servers = options['wfconfig']['DNS_SERVERS']\n\t\tdns_domain = options['wfconfig']['DNS_DOMAIN']\n\t\tpuppet_cert_domain = options['wfconfig']['PUPPET_CERT_DOMAIN']\n\t\twin_full_name = options['wfconfig']['WIN_FULL_NAME']\n\t\twin_org_name = options['wfconfig']['WIN_ORG_NAME']\n\t\twin_location = options['wfconfig']['WIN_LOCATION']\n\t\twin_os_domain = options['wfconfig']['WIN_OS_DOMAIN']\n\t\twin_dev_os_domain = options['wfconfig']['WIN_DEV_OS_DOMAIN']\n\t\tsn_location = options['wfconfig']['SN_LOCATION']\n\t\tnetwork_name = options['wfconfig']['NETWORK_NAME']\n\t\tcluster_storage_pools = options['wfconfig']['CLUSTER_STORAGE_POOLS']\n\t\tcluster_rpool = options['wfconfig']['CLUSTER_RPOOL']\n\t\tnotify_emails = options['notify_emails']\n\t\twin_groups = options['wfconfig']['WIN_GROUPS']\n\t\tos_templates = options['wfconfig']['OS_TEMPLATES']\n\t\tos_names = options['wfconfig']['OS_NAMES']\n\t\tos_disks = options['wfconfig']['OS_DISKS']\n\t\tvm_folder_name = None\n\telif workflow == 'sandbox':\n\t\tprefix = options['wfconfig']['SB_PREFIX']\n\t\tvcenter_tag = options['wfconfig']['SB_VCENTER_TAG']\n\t\tdomain = options['wfconfig']['SB_DOMAIN']\n\t\tpuppet_cert_domain = options['wfconfig']['SB_PUPPET_CERT_DOMAIN']\n\t\twin_full_name = options['wfconfig']['SB_WIN_FULL_NAME']\n\t\twin_org_name = options['wfconfig']['SB_WIN_ORG_NAME']\n\t\twin_location = options['wfconfig']['SB_WIN_LOCATION']\n\t\twin_os_domain = options['wfconfig']['SB_WIN_OS_DOMAIN']\n\t\twin_dev_os_domain = options['wfconfig']['SB_WIN_DEV_OS_DOMAIN']\n\t\tsn_location = options['wfconfig']['SB_SN_LOCATION']\n\t\tnetwork_name = options['wfconfig']['SB_NETWORK_NAME']\n\t\tcluster_storage_pools = options['wfconfig']['SB_CLUSTER_STORAGE_POOLS']\n\t\tcluster_rpool = options['wfconfig']['SB_CLUSTER_RPOOL']\n\t\twin_groups = options['wfconfig']['SB_WIN_GROUPS']\n\t\tos_templates = options['wfconfig']['SB_OS_TEMPLATES']\n\t\tos_names = options['wfconfig']['SB_OS_NAMES']\n\t\tos_disks = options['wfconfig']['SB_OS_DISKS']\n\t\tvm_folder_name = None\n\telif workflow == 'student':\n\t\tprefix = options['wfconfig']['STU_PREFIX']\n\t\tvcenter_tag = options['wfconfig']['STU_VCENTER_TAG']\n\t\tdomain = options['wfconfig']['STU_DOMAIN']\n\t\twin_full_name = options['wfconfig']['STU_WIN_FULL_NAME']\n\t\twin_org_name = options['wfconfig']['STU_WIN_ORG_NAME']\n\t\twin_location = options['wfconfig']['STU_WIN_LOCATION']\n\t\twin_os_domain = options['wfconfig']['STU_WIN_OS_DOMAIN']\n\t\twin_dev_os_domain = options['wfconfig']['STU_WIN_DEV_OS_DOMAIN']\n\t\tsn_location = options['wfconfig']['STU_SN_LOCATION']\n\t\tnetwork_name = options['wfconfig']['STU_NETWORK_NAMES'][options['network']]\n\t\tcluster_storage_pools = options['wfconfig']['STU_CLUSTER_STORAGE_POOLS']\n\t\twin_groups = options['wfconfig']['STU_WIN_GROUPS']\n\t\tos_templates = options['wfconfig']['STU_OS_TEMPLATES']\n\t\tos_names = options['wfconfig']['STU_OS_NAMES']\n\t\tos_disks = options['wfconfig']['STU_OS_DISKS']\n\t\tvm_folder_name = option['wfconfig']['STU_VM_FOLDER']\n\n\t## Allocate a hostname #################################################\n\n\t# Start the task\n\thelper.event(\"allocate_name\", \"Allocating a '\" + prefix + \"' system name\")\n\n\t# Allocate the name\n\tsystem_info = helper.lib.allocate_name(prefix, options['purpose'], helper.username, expiry=options['expiry'])\n\n\t# system_info is a dictionary containg a single { 'hostname': database_id }. Extract both of these:\n\tsystem_name = system_info.keys()[0]\n\tsystem_dbid = system_info.values()[0]\n\n\t# End the event\n\thelper.end_event(description=\"Allocated system name \" + system_name)\n\n\n\n\t## Allocate an IPv4 Address and create a host object (standard only) ###\n\n\tif workflow == 'standard':\n\t\t# Start the event\n\t\thelper.event(\"allocate_ipaddress\", \"Allocating an IP address from \" + network)\n\n\t\t# Allocate an IP address\n\t\tipv4addr = helper.lib.infoblox_create_host(system_name + \".\" + domain, network)\n\n\t\t# Handle errors - this will stop the task\n\t\tif ipv4addr is None:\n\t\t\traise Exception('Failed to allocate an IP address')\n\n\t\t# End the event\n\t\thelper.end_event(description=\"Allocated the IP address \" + ipv4addr)\n\telse:\n\t\tipv4addr = None\n\n\n\n\t## Create the virtual machine post-clone specification #################\n\n\t# Start the event\n\thelper.event(\"vm_clone\", \"Creating the virtual machine using VMware API\")\n\n\t# Pull some information out of the configuration\n\ttemplate_name = os_templates[options['template']]\n\tos_name = os_names[options['template']]\n\tos_disk_size = os_disks[options['template']]\n\n\t# For RHEL6, RHEL7 or Ubuntu:\n\tif options['template'] in ['rhel6', 'rhel7', 'rhel6c', 'ubuntu_14.04_lts']:\n\t\tos_type = helper.lib.OS_TYPE_BY_NAME['Linux']\n\t\tvm_spec = None\n\n\t# For Server 2012R2\n\telif options['template'] == 'windows_server_2012' or options['template'] == 'windows_server_2016' or options['template'] == 'windows_server_2016_core':\n\t\tos_type = helper.lib.OS_TYPE_BY_NAME['Windows']\n\n\t\t# Build a customisation spec depending on the environment to use the correct domain details\n\t\tif workflow == 'standard':\n\t\t\tif options['env'] == 'dev':\n\t\t\t\tvm_spec = helper.lib.vmware_vm_custspec(dhcp=False, gateway=gateway, netmask=netmask, ipaddr=ipv4addr, dns_servers=dns_servers, dns_domain=dns_domain, os_type=os_type, os_domain='devdomain.soton.ac.uk', timezone=85, domain_join_user=helper.config['AD_DEV_JOIN_USER'], domain_join_pass=helper.config['AD_DEV_JOIN_PASS'], fullname=win_full_name, orgname=win_org_name)\n\t\t\telse:\n\t\t\t\tvm_spec = helper.lib.vmware_vm_custspec(dhcp=False, gateway=gateway, netmask=netmask, ipaddr=ipv4addr, dns_servers=dns_servers, dns_domain=dns_domain, os_type=os_type, os_domain='soton.ac.uk', timezone=85, domain_join_user=helper.config['AD_PROD_JOIN_USER'], domain_join_pass=helper.config['AD_PROD_JOIN_PASS'], fullname=win_full_name, orgname=win_org_name)\n\t\telif workflow in ['sandbox', 'student']:\n\t\t\tif options['env'] == 'dev':\n\t\t\t\tvm_spec = helper.lib.vmware_vm_custspec(dhcp=True, os_type=os_type, os_domain=win_dev_os_domain, timezone=85, domain_join_user=helper.config['AD_DEV_JOIN_USER'], domain_join_pass=helper.config['AD_DEV_JOIN_PASS'], fullname=win_full_name, orgname=win_org_name)\n\t\t\telse:\n\t\t\t\tvm_spec = helper.lib.vmware_vm_custspec(dhcp=True, os_type=os_type, os_domain=win_os_domain, timezone=85, domain_join_user=helper.config['AD_PROD_JOIN_USER'], domain_join_pass=helper.config['AD_PROD_JOIN_PASS'], fullname=win_full_name, orgname=win_org_name)\n\n\n\t# Anything else\n\telse:\n\t\traise RuntimeError(\"Unknown template specified\")\n\n\t# Connect to vCenter\n\tsi = helper.lib.vmware_smartconnect(vcenter_tag)\n\n\t# Get the vm folder to use if any\n\tvm_folder = None\n\tif vm_folder_name is not None:\n\t\tvm_folder = vm_folder_name\n\n\telif \"default_folder\" in helper.config['VMWARE'][vcenter_tag]:\n\t\tvm_folder = helper.config['VMWARE'][vcenter_tag]['default_folder']\n\n\t# Get the vm resource pool to use if any\n\tvm_rpool = cluster_rpool.get(options['cluster'], \"Root Resource Pool\")\n\n\t# Launch the task to clone the virtual machine\n\ttask = helper.lib.vmware_clone_vm(si, template_name, system_name, vm_rpool=vm_rpool, vm_cluster=options['cluster'], custspec=vm_spec, vm_folder=vm_folder, vm_network=network_name, vm_datastore_cluster=cluster_storage_pools[options['cluster']])\n\thelper.lib.vmware_task_complete(task, \"Failed to create the virtual machine\")\n\n\t# End the event\n\thelper.end_event(description=\"Created the virtual machine successfully\")\n\n\t# Get the VM object (so we can reconfigure it)\n\tvm = task.info.result\n\n\t# If we don't have a VM, then kill the task\n\tif vm == None:\n\t\traise RuntimeError(\"VM creation failed: VMware API did not return a VM object reference\")\n\n\n\n\t## Configure vCPUs #####################################################\n\n\t# Start the event\n\thelper.event(\"vm_reconfig_cpu\", \"Setting VM CPU configuration\")\n\n\t# Get total CPUs desired from our options\n\ttotal_cpu = int(options['sockets']) * int(options['cores'])\n\n\t# Get number of cores per socket\n\tcpus_per_core = int(options['cores'])\n\t\n\t# Reconfigure the VM\n\ttask = helper.lib.vmware_vmreconfig_cpu(vm, total_cpu, cpus_per_core)\n\thelper.lib.vmware_task_complete(task, \"Failed to set vCPU configuration\")\n\n\t# End the event\n\thelper.end_event(description=\"VM vCPU configuation saved\")\n\n\n\n\t## Configure RAM #######################################################\n\n\t# Start the event\n\thelper.event(\"vm_reconfig_ram\", \"Setting VM RAM configuration\")\n\n\t# Reconfigure the VM\n\ttask = helper.lib.vmware_vmreconfig_ram(vm, int(options['ram']) * 1024)\n\thelper.lib.vmware_task_complete(task, \"Failed to set RAM configuration\")\n\n\t# End the event\n\thelper.end_event(description=\"VM RAM configuation saved\")\n\n\n\n\t## Configure Disk ######################################################\n\n\t# Add disk to the VM\n\tif int(options['disk']) > 0:\n\t\t# Start the event\n\t\thelper.event(\"vm_add_disk\", \"Adding data disk to the VM\")\n\n\t\t# Reconfigure the VM to add the disk\n\t\ttask = helper.lib.vmware_vm_add_disk(vm, int(options['disk']) * 1024 * 1024 * 1024)\n\t\thelper.lib.vmware_task_complete(task, \"Could not add data disk to VM\")\n\n\t\t# End the event\n\t\thelper.end_event(description=\"Data disk added to VM\")\n\n\n\n\t## Set up annotation ###################################################\n\n\t# Start the event\n\thelper.event(\"vm_config_notes\", \"Setting VM notes annotation\")\n\n\t# Failure of the following does not kill the task\n\ttry:\n\t\t# Set the notes\n\t\ttask = helper.lib.vmware_vmreconfig_notes(vm, options['purpose'])\n\n\t\t# End the event\n\t\thelper.lib.vmware_task_complete(task, \"VM notes annotation set\")\n\texcept Exception as e:\n\t\thelper.end_event(success=False, description=\"Failed to set VM notes annotation: \" + str(e))\n\n\n\n\t## Update Cortex Cache #################################################\n\n\t# We do this so that we don't have to wait for the next run of the \n\t# scheduled VMware import). We do this before powering the VM on 'cos\n\t# the cache must be up to date before the installers run inside the VM.\n\n\t# Start the event\n\thelper.event(\"update_cache\", \"Updating Cortex VM cache item\")\n\n\t# Failure of this does not kill the task\n\ttry:\n\t\t# Update the cache item\n\t\thelper.lib.update_vm_cache(vm, vcenter_tag)\n\n\t\t# End the event\n\t\thelper.end_event(\"Updated Cortex VM cache item\")\n\texcept Exception as e:\n\t\thelper.end_event(success=False, description=\"Failed to update Cortex VM cache item - VMware information may be incorrect\")\n\n\n\n\t## Power on the VM #####################################################\n\n\t# Start the event\n\thelper.event(\"vm_poweron\", \"Powering the VM on for the first time\")\n\n\t# Set up the necessary values in redis\n\thelper.lib.redis_set_vm_data(vm, \"hostname\", system_name)\n\tif workflow == 'standard':\n\t\thelper.lib.redis_set_vm_data(vm, \"ipaddress\", ipv4addr)\n\telif workflow in ['sandbox', 'student']:\n\t\thelper.lib.redis_set_vm_data(vm, \"ipaddress\", 'dhcp')\n\n\t# Power on the VM\n\ttask = helper.lib.vmware_vm_poweron(vm)\n\thelper.lib.vmware_task_complete(task, \"Could not power on the VM\")\n\n\t# If we've not powered on within 30 seconds, fail\n\tif not helper.lib.vmware_wait_for_poweron(vm, 30):\n\t\thelper.end_event(success=False, description=\"VM not powered on after 30 seconds. Check vCenter for more information\")\n\n\t# End the event\n\thelper.end_event(description=\"VM powered up\")\t\n\n\n\n\t## Register Linux VMs with the built in Puppet ENC #####################\n\n\t# Only for Linux VMs...\n\tif os_type == helper.lib.OS_TYPE_BY_NAME['Linux'] and options['template'] != 'rhel6c':\n\t\t# Start the event\n\t\thelper.event(\"puppet_enc_register\", \"Registering with Puppet ENC\")\n\n\t\t# Register with the Puppet ENC\n\t\thelper.lib.puppet_enc_register(system_dbid, system_name + \".\" + puppet_cert_domain, options['env'])\n\n\t\t# End the event\n\t\thelper.end_event(\"Registered with Puppet ENC\")\n\n\n\n\t## Create the ServiceNow CMDB CI #######################################\n\n\t# Start the event\n\thelper.event(\"sn_create_ci\", \"Creating ServiceNow CMDB CI\")\n\tsys_id = None\n\tcmdb_id = None\n\n\t# Failure does not kill the task\n\ttry:\n\t\t# Create the entry in ServiceNow\n\t\t(sys_id, cmdb_id) = helper.lib.servicenow_create_ci(ci_name=system_name, os_type=os_type, os_name=os_name, sockets=int(options['sockets']), cores_per_socket=int(options['cores']), ram_mb=int(options['ram']) * 1024, disk_gb=int(options['disk']) + os_disk_size, environment=options['env'], short_description=options['purpose'], comments=options['comments'], location=sn_location, ipaddr=ipv4addr)\n\n\t\t# Update Cortex systems table row with the sys_id\n\t\thelper.lib.set_link_ids(system_dbid, cmdb_id=sys_id, vmware_uuid=vm.config.uuid)\n\n\t\t# End the event\n\t\thelper.end_event(success=True, description=\"Created ServiceNow CMDB CI\")\n\texcept Exception as e:\n\t\thelper.end_event(success=False, description=\"Failed to create ServiceNow CMDB CI\")\n\n\n\n\t## Link ticket to CI (standard VM only) ################################\n\n\t# If we succeeded in creating a CI, try linking the task\n\tif workflow == 'standard' and sys_id is not None and options['task'] is not None and len(options['task'].strip()) != 0:\n\t\t# Start the event\n\t\thelper.event(\"sn_link_task_ci\", \"Linking ServiceNow Task to CI\")\n\n\t\t# Failure does not kill the task\n\t\ttry:\n\t\t\t# Link the ServiceNow task to the CI\n\t\t\tlink_sys_id = helper.lib.servicenow_link_task_to_ci(sys_id, options['task'].strip())\n\n\t\t\t# End the event\n\t\t\thelper.end_event(success=True, description=\"Linked ServiceNow Task to CI\")\n\t\texcept Exception as e:\n\t\t\thelper.end_event(success=False, description=\"Failed to link ServiceNow Task to CI. \" + str(e))\n\n\n\n\t## Wait for the VM to finish building ##################################\n\n\t# Linux has separate events for installation starting and installation\n\t# finishing, but windows only has installation finishing\n\tif os_type == helper.lib.OS_TYPE_BY_NAME['Linux']:\n\t\t# Start the event\n\t\thelper.event('guest_installer_progress', 'Waiting for in-guest installation to start')\n\n\t\t# Wait for the in-guest installer to set the state to 'progress' or 'done'\n\t\twait_response = helper.lib.wait_for_guest_notify(vm, ['inprogress', 'done'])\n\n\t\t# When it returns, end the event\n\t\tif wait_response is None or wait_response not in ['inprogress', 'done']:\n\t\t\thelper.end_event(success=False, description='Timed out waiting for in-guest installation to start')\n\n\t\t\t# End the task here\n\t\t\treturn\n\t\telse:\n\t\t\thelper.end_event(success=True, description='In-guest installation started')\n\n\t# Start another event\n\thelper.event('guest_installer_done', 'Waiting for in-guest installation to finish')\n\n\t# Wait for the in-guest installer to set the state to 'progress' or 'done'\n\twait_response = helper.lib.wait_for_guest_notify(vm, ['done'])\n\n\t# When it returns, end the event\n\tif wait_response is None or wait_response not in ['done']:\n\t\thelper.end_event(success=False, description='Timed out waiting for in-guest installation to finish')\n\telse:\n\t\thelper.end_event(success=True, description='In-guest installation finished')\n\n\n\n\t## For Windows VMs, join groups and stuff ##############################\n\n\tif os_type == helper.lib.OS_TYPE_BY_NAME['Windows']:\n\t\t# Put in Default OU (failure does not kill task)\n\t\ttry:\n\t\t\t# Start the event\n\t\t\thelper.event('windows_move_ou', 'Moving Computer object to Default OU')\n\n\t\t\t# Run RPC to put in default OU\n\t\t\thelper.lib.windows_move_computer_to_default_ou(system_name, options['env'])\n\n\t\t\t# End the event\n\t\t\thelper.end_event(success=True, description='Moved Computer object to Default OU')\n\t\texcept Exception as e:\n\t\t\thelper.end_event(success=False, description='Failed to put Computer object in OU: ' + str(e))\n\n\t\t# Join default groups (failure does not kill task)\n\t\ttry:\n\t\t\t# Start the event\n\t\t\thelper.event('windows_join_groups', 'Joining default groups')\n\n\t\t\t# Run RPC to join groups\n\t\t\thelper.lib.windows_join_groups(system_name, options['env'], win_groups[options['env']])\n\n\t\t\t# End the event\n\t\t\thelper.end_event(success=True, description='Joined default groups')\n\t\texcept Exception as e:\n\t\t\thelper.end_event(success=False, description='Failed to join default groups: ' + str(e))\n\n\t\t# Set up computer information (failure does not kill task)\n\t\ttry:\n\t\t\t# Start the event\n\t\t\thelper.event('windows_set_info', 'Setting Computer object attributes')\n\n\t\t\t# Run RPC to set information\n\t\t\thelper.lib.windows_set_computer_details(system_name, options['env'], options['purpose'], win_location)\n\n\t\t\t# End the event\n\t\t\thelper.end_event(success=True, description='Computer object attributes set')\n\t\texcept Exception as e:\n\t\t\thelper.end_event(success=False, description='Failed to set Computer object attributes: ' + str(e))\n\n\t\t# Wait for 60 seconds to allow time for the VM to come back up\n\t\t# This feels like a bit of a hack currently, but we don't have\n\t\t# a way currently of knowing if the VM is up.\n\t\thelper.event('windows_delay', 'Wait and restart guest')\n\t\ttime.sleep(60)\n\n\t\t# Restart the guest\n\t\thelper.lib.vmware_vm_restart_guest(vm)\n\t\thelper.end_event(success=True, description='Initiated guest restart')\n\n\n\n\t## Send success email ##################################################\n\n\t# Build the text of the message\n\tmessage = 'Cortex has finished building your VM. The details of the VM can be found below.\\n'\n\tmessage += '\\n'\n\tif workflow in ['standard', 'sandbox']:\n\t\tif workflow == 'standard':\n\t\t\tmessage += 'ServiceNow Task: ' + str(options['task']) + '\\n'\n\t\tmessage += 'Hostname: ' + str(system_name) + '.' + str(domain) + '\\n'\n\t\tif ipv4addr is not None:\n\t\t\tmessage += 'IP Address: ' + str(ipv4addr) + '\\n'\n\t\tmessage += 'VMware Cluster: ' + str(options['cluster']) + '\\n'\n\t\tmessage += 'Purpose: ' + str(options['purpose']) + '\\n'\n\t\tmessage += 'Operating System: ' + str(os_name) + '\\n'\n\t\tmessage += 'CPUs: ' + str(total_cpu) + '\\n'\n\t\tmessage += 'RAM: ' + str(options['ram']) + ' GiB\\n'\n\t\tmessage += 'Data Disk: ' + str(options['disk']) + ' GiB\\n'\n\t\tmessage += '\\n'\n\t\tmessage += 'The event log for the task can be found at https://' + str(helper.config['CORTEX_DOMAIN']) + '/task/status/' + str(helper.task_id) + '\\n'\n\t\tmessage += 'More information about the VM, can be found on the Cortex systems page at https://' + str(helper.config['CORTEX_DOMAIN']) + '/systems/edit/' + str(system_dbid) + '\\n'\n\t\tif sys_id is not None:\n\t\t\tmessage += 'The ServiceNow CI entry is available at ' + (helper.config['CMDB_URL_FORMAT'] % sys_id) + '\\n'\n\t\telse:\n\t\t\tmessage += 'A ServiceNow CI was not created. For more information, see the task event log.\\n'\n\n\t\tmessage += '\\nPlease remember to move the virtual machine into an appropriate folder in vCenter'\n\t\tif os_type == helper.lib.OS_TYPE_BY_NAME['Windows']:\n\t\t\tmessage += ' and to an appropriate OU in Active Directory'\n\t\tmessage += '\\n'\n\telse:\n\t\tmessage += 'Purpose: ' + str(options['purpose']) + '\\n'\n\t\tmessage += 'Operating System: ' + str(os_name) + '\\n'\n\t\tmessage += 'CPUs: ' + str(total_cpu) + '\\n'\n\t\tmessage += 'RAM: ' + str(options['ram']) + ' GiB\\n'\n\t\tmessage += '\\n'\n\t\tmessage += 'The event log for the task can be found at https://' + str(helper.config['CORTEX_DOMAIN']) + '/task/status/' + str(helper.task_id) + '\\n'\n\t\tmessage += 'More information about the VM, can be found on the Cortex systems page at https://' + str(helper.config['CORTEX_DOMAIN']) + '/systems/edit/' + str(system_dbid) + '\\n'\n\t\t\n\n\t# Send the message to the user who started the task (if they want it)\n\tif options['sendmail']:\n\t\thelper.lib.send_email(helper.username, 'Cortex has finished building your VM, ' + str(system_name), message)\n\n\t# For standard VMs only, always notify people in the notify_emails list\n\tif workflow == 'standard':\n\t\tfor email in notify_emails: \n\t\t\thelper.lib.send_email(email, 'Cortex has finished building a VM, ' + str(system_name), message)\n","sub_path":"buildvm/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":19866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"276464457","text":"import os\nimport psycopg2\nimport datetime\n\n\noutput_path = '/home/ubuntu/swat_data/lower_mekong/Outputs'\nwatershed_name = 'lower_mekong'\nsub_vars = ['PRECIPmm', 'PETmm', 'ETmm', 'SWmm', 'PERCmm', 'SURQmm', 'GW_Qmm', 'WYLDmm', 'SYLDt/ha']\nrch_vars = ['FLOW_INcms', 'FLOW_OUTcms', 'EVAPcms', 'SED_INtons', 'SED_OUTtons', 'SEDCONCmg/kg', 'ORGN_INkg', 'ORGN_OUTkg', 'DISOX_INkg', 'DISOX_OUTkg']\n\n\n\nsub_column_list = ['', 'SUB', 'GIS', 'MO', 'DA', 'YR', 'AREAkm2', 'PRECIPmm', 'SNOMELTmm', 'PETmm', 'ETmm', 'SWmm', 'PERCmm',\n 'SURQmm', 'GW_Qmm', 'WYLDmm', 'SYLDt/ha', 'ORGNkg/ha', 'ORGPkg/ha', 'NSURQkg/ha', 'SOLPkg/ha',\n 'SEDPkg/ha', 'LATQmm', 'LATNO3kg/ha', 'GWNO3kg/ha', 'CHOLAmic/L', 'CBODUmg/L', 'DOXQmg/L', 'TNO3kg/ha']\n\nrchmonth_column_list = ['', 'RCH', 'GIS', 'MON', 'AREAkm2', 'FLOW_INcms', 'FLOW_OUTcms', 'EVAPcms', 'TLOSScms', 'SED_INtons',\n 'SED_OUTtons', 'SEDCONCmg/kg', 'ORGN_INkg', 'ORGN_OUTkg', 'ORGP_INkg', 'ORGP_OUTkg', 'NO3_INkg',\n 'NO3_OUTkg', 'NH4_INkg', 'NH4_OUTkg', 'NO2_INkg', 'NO2_OUTkg', 'MINP_INkg', 'MINP_OUTkg',\n 'CHLA_INkg', 'CHLA_OUTkg', 'CBOD_INkg', 'CBOD_OUTkg', 'DISOX_INkg', 'DISOX_OUTkg', 'SOLPST_INmg',\n 'SOLPST_OUTmg', 'SORPST_INmg', 'SORPST_OUTmg', 'REACTPSTmg', 'VOLPSTmg', 'SETTLPSTmg', 'RESUSP_PSTmg',\n 'DIFFUSEPSTmg', 'REACBEDPSTmg', 'BURYPSTmg', 'BED_PSTmg', 'BACTP_OUTct', 'BACTLP_OUTct', 'CMETAL#1kg',\n 'CMETAL#2kg', 'CMETAL#3kg', 'TOTNkg', 'TOTPkg', 'NO3ConcMg/l', 'WTMPdegc']\n\nrchday_column_list = ['', 'RCH', 'GIS', 'MO', 'DA', 'YR', 'AREAkm2', 'FLOW_INcms', 'FLOW_OUTcms', 'EVAPcms', 'TLOSScms', 'SED_INtons',\n 'SED_OUTtons', 'SEDCONCmg/kg', 'ORGN_INkg', 'ORGN_OUTkg', 'ORGP_INkg', 'ORGP_OUTkg', 'NO3_INkg',\n 'NO3_OUTkg', 'NH4_INkg', 'NH4_OUTkg', 'NO2_INkg', 'NO2_OUTkg', 'MINP_INkg', 'MINP_OUTkg',\n 'CHLA_INkg', 'CHLA_OUTkg', 'CBOD_INkg', 'CBOD_OUTkg', 'DISOX_INkg', 'DISOX_OUTkg', 'SOLPST_INmg',\n 'SOLPST_OUTmg', 'SORPST_INmg', 'SORPST_OUTmg', 'REACTPSTmg', 'VOLPSTmg', 'SETTLPSTmg', 'RESUSP_PSTmg',\n 'DIFFUSEPSTmg', 'REACBEDPSTmg', 'BURYPSTmg', 'BED_PSTmg', 'BACTP_OUTct', 'BACTLP_OUTct', 'CMETAL#1kg',\n 'CMETAL#2kg', 'CMETAL#3kg', 'TOTNkg', 'TOTPkg', 'NO3ConcMg/l', 'WTMPdegc']\n\n\n\n\ndef upload_swat_outputs(output_path, watershed_name):\n conn = psycopg2.connect('dbname=swat2_swat_db user=tethys_super password=pass host=localhost port=5435')\n cur = conn.cursor()\n cur.execute(\"\"\"SELECT * FROM watershed WHERE name = '{0}'\"\"\".format(watershed_name))\n records = cur.fetchall()\n \n\n if len(records) > 0:\n print(\"watershed name already exists\")\n else:\n cur.execute(\"\"\"INSERT INTO watershed (name) VALUES ('{0}')\"\"\".format(watershed_name))\n\n conn.commit()\n\n cur.execute(\"\"\"SELECT * FROM watershed WHERE name = '{0}'\"\"\".format(watershed_name))\n records = cur.fetchall()\n print(records)\n watershed_id = records[0][0]\n print(watershed_id)\n\n for file in os.listdir(output_path):\n if file.endswith('.sub'):\n print('sub')\n sub_path = os.path.join(output_path, file)\n f = open(sub_path)\n for skip_line in f:\n if 'AREAkm2' in skip_line:\n break\n for num, line in enumerate(f, 1):\n line = str(line.strip())\n columns = line.split()\n if columns[0] != 'BIGSUB':\n split = columns[0]\n columns[0] = split[:6]\n columns.insert(1, split[6:])\n for idx, item in enumerate(sub_vars):\n sub = int(columns[1])\n dt = datetime.date(int(columns[5]), int(columns[3]), int(columns[4]))\n var_name = item\n val = float(columns[sub_column_list.index(item)])\n cur.execute(\"\"\"INSERT INTO output_sub (watershed_id, year_month_day, sub_id, var_name, val)\n VALUES ({0}, '{1}', {2}, '{3}', {4})\"\"\".format(watershed_id, dt, sub, var_name, val))\n\n conn.commit()\n\n if file.endswith('.rch'):\n if 'daily' in file:\n print('rch')\n rchday_path = os.path.join(output_path, file)\n f = open(rchday_path)\n for skip_line in f:\n if 'AREAkm2' in skip_line:\n break\n for num, line in enumerate(f, 1):\n line = str(line.strip())\n columns = line.split()\n for idx, item in enumerate(rch_vars):\n reach = int(columns[1])\n dt = datetime.date(int(columns[5]), int(columns[3]), int(columns[4]))\n var_name = item\n val = float(columns[rchday_column_list.index(item)])\n cur.execute(\"\"\"INSERT INTO output_rch_day (watershed_id, year_month_day, reach_id, var_name, val)\n VALUES ({0}, '{1}', {2}, '{3}', {4})\"\"\".format(watershed_id, dt, reach, var_name, val))\n\n conn.commit()\n\n conn.close()\n\n\n\nupload_swat_outputs(output_path, 'lower_mekong')\n\n# if file.endswith('.hru'):\n# print('hru')\n# hru_path = os.path.join(output_path, file)\n# f = open(hru_path)\n# for skip_line in f:\n# if 'LULC' in skip_line:\n# break\n#\n# for num, line in enumerate(f, 1):\n# line = str(line.strip())\n# columns = line.split()\n# if len(columns[0]) > 4:\n# split = columns[0]\n# split_parts = re.split('(\\d.*)', split)\n# columns[0] = split_parts[0]\n# columns.insert(1, split_parts[1])\n# if int(columns[7]) == year_one:\n# for idx, item in enumerate(hru_vars):\n# lulc = columns[0]\n# hru = int(columns[1])\n# sub = int(columns[3])\n# dt = datetime.date(int(columns[7]), int(columns[5]), int(columns[6]))\n# var_name = item\n# val = float(columns[hru_column_list.index(item)])\n# cur.execute(\"\"\"INSERT INTO output_hru (watershed_id, month_day_year, sub_id, hru_id, lulc, var_name, val)\n# VALUES ({0}, '{1}', {2}, {3}, '{4}', '{5}', {6})\"\"\".format(watershed_id, dt, sub, hru, lulc,\n# var_name, val))\n#\n# conn.commit()\n# else:\n# break","sub_path":"tethysapp-swat2/build/lib/tethysapp/swat2/output_to_db.py","file_name":"output_to_db.py","file_ext":"py","file_size_in_byte":6646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"651614200","text":"'''\n@Time : 2019/11/20 15:10\n@Author : XXXX\n@Software: PyCharm\n'''\nimport urllib\nimport requests\nimport os\nimport lxml\nfrom lxml import etree\n\nitems=[]\n\nclass item:\n title =''\n music_url = ''\n def __init__(self,title,music_url):\n self.title = title\n self.music_url = music_url\n\n def get_title(self):\n return self.title\n\n def get_music_url(self):\n return self.music_url\n\n\ndef start_request(url):\n headers = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'Connection': 'keep-alive',\n 'Cache-Control':'max-age=0',\n 'Host': 'www.tukuppt.com',\n 'Referer': 'https://www.tukuppt.com/yinxiao/j109/',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',\n 'X-Requested-With': 'XMLHttpRequest'\n }\n res=requests.get(url,headers=headers,timeout=5)\n res.encoding='utf-8'\n html=lxml.etree.HTML(res.text)\n get_item(html)\n next_pages = html.xpath(\"//a[contains(text(),'下一页')]/@href\") # https://www.tukuppt.com\n return next_pages\n\ndef get_item(html):\n for box in html.xpath('//dl[@class=\"cbox audio-box \"]'):\n title=box.xpath('.//dt[@class=\"info\"]//a[1]/text()')[0]\n music_url = \"https:\" + box.xpath('.//audio[@preload=\"none\"]//source/@src')[0]\n it= item(title,music_url)\n items.append(it)\n\ndef run(start_url):\n next_pages=start_request(start_url)\n while len(next_pages) > 0:\n print(next_pages[0])\n page=start_request(\"https://www.tukuppt.com\"+next_pages[0])\n next_pages = page\n\ndef download_music(path):\n if not os.path.exists(path):\n os.makedirs(path)\n for it in items:\n try:\n print(\"*\"*10 + \"正在下载——\"+it.get_title()+\"——\"+\"*\"*10)\n content=urllib.request.urlopen(it.get_music_url(),timeout=5).read()\n with open(path+it.get_title()+'.MP3','wb') as file:\n file.write(content)\n file.close()\n except Exception as e:\n print(\"*\"*15+str(e))\n print('')\n continue\n\n\nif __name__ == \"__main__\":\n start_url=\"https://www.tukuppt.com/yinxiao/j110/\" #起始链接\n path=\"F://file//music//\" #存储地址\n run(start_url)\n download_music(path)\n","sub_path":"爬虫系列/0000.py","file_name":"0000.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"235846977","text":"from hillClimbing import hillClimbing # 引入解答類別\nfrom solution import Solution\nimport random\n\n\n\ndef encrypt(text, key):\n list2 = []\n klen = len(key)\n for i in range(len(text)):\n ki = i%klen\n list2.append(text[i]+key[ki])\n\nclass SolutionVirginia(Solution):\n def neighbor(self): # 單變數解答的鄰居函數。\n key1 = self.v.copy()\n len = key1.length\n i = random.randrange(0, len)\n key1[i] = random.randint(0, 255)\n return SolutionVirginia(key1) # 建立新解答並傳回。\n\n def height(self): # 能量函數\n key1 = self.v\n # 比對文章,看看出現多少次常用字,這就是分數\n score = fit(key, text)\n return score\n\n def str(self): # 將解答轉為字串,以供印出觀察。\n return \"key={} score={}\".format(self.v, self.height())","sub_path":"homework/work1/solutionVirginia.py","file_name":"solutionVirginia.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"326311072","text":"import os\nimport sys\nimport json\n\nclass Subjects:\n \"\"\"Subjects models the Eprint subjects values into a flat list\"\"\"\n\n def __init__ (self):\n self.subjects = {}\n\n def load_subjects(self, f_name):\n \"\"\"Load an Eprint subjects file, e.g. /eprint3-1/archives/REPO/cfg/subjects\"\"\"\n with open(f_name) as f:\n lines = f.readlines()\n for line in lines:\n # Trim comment\n if \"#\" in line:\n parts = line.split(sep = \"#\", maxsplit = 2)\n line = parts[0].strip()\n # split on colon\n if \":\" in line:\n parts = line.split(\":\")\n if len(parts) > 1:\n # get key and label\n key = parts[0].strip()\n label = parts[1].strip()\n # add to self.subjects\n self.subjects[key] = label\n\n \n def has_subject(self, key):\n if key in self.subjects:\n return True\n return False\n \n def get_subject(self, key):\n if key in self.subjects:\n return self.subjects[key]\n return ''\n\n def get_keys(self):\n keys = []\n for key in self.subjects:\n keys.append(key)\n return keys\n","sub_path":"eprinttools/subjects.py","file_name":"subjects.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"449198812","text":"from xdoctest.utils import util_misc\nimport sys\nfrom xdoctest import utils\n\n\ndef cmd(command):\n # simplified version of ub.cmd no fancy tee behavior\n import subprocess\n proc = subprocess.Popen(\n command, shell=True, universal_newlines=True,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n out, err = proc.communicate()\n ret = proc.wait()\n info = {\n 'proc': proc,\n 'out': out,\n 'test_doctest_in_notebook.ipynberr': err,\n 'ret': ret,\n }\n return info\n\n\ndef test_simple_pytest_cli():\n module_text = utils.codeblock(\n '''\n def module_func1():\n \"\"\"\n This module has a doctest\n\n Example:\n >>> print('hello world')\n \"\"\"\n ''')\n temp_module = util_misc.TempModule(module_text)\n modpath = temp_module.modpath\n\n info = cmd(sys.executable + ' -m pytest --xdoctest ' + modpath)\n print(info['out'])\n assert info['ret'] == 0\n\n\ndef test_simple_pytest_import_error_cli():\n \"\"\"\n This test case triggers an excessively long callback in xdoctest <\n dev/0.15.7\n\n CommandLine:\n xdoctest ~/code/xdoctest/testing/test_pytest_cli.py test_simple_pytest_import_error_cli\n \"\"\"\n module_text = utils.codeblock(\n '''\n # There are lines before the bad line\n import os\n import sys\n import does_not_exist\n\n def module_func1():\n \"\"\"\n This module has a doctest\n\n Example:\n >>> print('hello world')\n \"\"\"\n ''')\n temp_module = util_misc.TempModule(module_text, modname='imperr_test_mod')\n command = sys.executable + ' -m pytest -v -s --xdoctest-verbose=3 --xdoctest ' + temp_module.dpath\n print(command)\n info = cmd(command)\n # We patched doctest_example so it no longer outputs this in the traceback\n assert 'util_import' not in info['out']\n print(info['out'])\n # Note: flaky changes the return code from 1 to 3, so test non-zero\n assert info['ret'] != 0\n\n\ndef test_simple_pytest_syntax_error_cli():\n \"\"\"\n \"\"\"\n module_text = utils.codeblock(\n '''\n &&does_not_exist\n\n def module_func1():\n \"\"\"\n This module has a doctest\n\n Example:\n >>> print('hello world')\n \"\"\"\n ''')\n temp_module = util_misc.TempModule(module_text)\n info = cmd(sys.executable + ' -m pytest --xdoctest ' + temp_module.dpath)\n print(info['out'])\n assert info['ret'] != 0\n\n info = cmd(sys.executable + ' -m pytest --xdoctest ' + temp_module.modpath)\n print(info['out'])\n assert info['ret'] != 0\n\n\ndef test_simple_pytest_import_error_no_xdoctest():\n \"\"\"\n \"\"\"\n module_text = utils.codeblock(\n '''\n import does_not_exist\n\n def test_this():\n print('hello world')\n ''')\n temp_module = util_misc.TempModule(module_text)\n info = cmd(sys.executable + ' -m pytest ' + temp_module.modpath)\n print(info['out'])\n assert info['ret'] != 0\n\n info = cmd(sys.executable + ' -m pytest ' + temp_module.dpath)\n print(info['out'])\n assert info['ret'] != 0\n\n\ndef test_simple_pytest_syntax_error_no_xdoctest():\n \"\"\"\n \"\"\"\n module_text = utils.codeblock(\n '''\n &&does_not_exist\n\n def test_this():\n print('hello world')\n ''')\n temp_module = util_misc.TempModule(module_text)\n info = cmd(sys.executable + ' -m pytest ' + temp_module.modpath)\n print(info['out'])\n assert info['ret'] != 0\n\n info = cmd(sys.executable + ' -m pytest ' + temp_module.dpath)\n print(info['out'])\n assert info['ret'] != 0\n","sub_path":"testing/test_pytest_cli.py","file_name":"test_pytest_cli.py","file_ext":"py","file_size_in_byte":3681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"445844840","text":"import seaborn as sns\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef plot(data):\n corr = data.corr()\n dim = corr.shape\n sns.set(style=\"white\")\n fig = plt.subplots(figsize=tuple(np.subtract(dim, (1, 1))))\n mask = np.zeros_like(corr, dtype=np.bool)\n mask[np.triu_indices_from(mask)] = True\n fig = sns.heatmap(\n corr, annot=True, square=True, vmin=-1, vmax=1,\n cmap='viridis', cbar_kws={\"shrink\": .5}, center=0, mask=mask)\n fig.set_xlim(0, dim[0] - 1)\n fig.set_ylim(dim[1], 1)\n plt.show()\n","sub_path":"Scripts/Plotting.py","file_name":"Plotting.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"284422504","text":"# -*- coding: utf-8 -*-\nimport logging\nimport json\nimport os\nimport re\n\nfrom addons.base.models import BaseUserSettings, BaseNodeSettings\nfrom addons.osfstorage.models import OsfStorageFileNode\nfrom django.db import models\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils import timezone\nfrom addons.metadata import SHORT_NAME\nfrom osf.models import DraftRegistration, BaseFileNode, NodeLog, AbstractNode\nfrom osf.models.user import OSFUser\nfrom osf.models.base import BaseModel\nfrom osf.models.metaschema import RegistrationSchema\nfrom osf.utils.fields import EncryptedTextField, NonNaiveDateTimeField\nfrom website import settings as website_settings\n\nlogger = logging.getLogger(__name__)\n\n\nFIELD_GRDM_FILES = 'grdm-files'\n\n\ndef get_draft_files(draft_metadata):\n if FIELD_GRDM_FILES not in draft_metadata:\n return []\n draft_files = draft_metadata[FIELD_GRDM_FILES]\n if 'value' not in draft_files:\n return []\n draft_value = draft_files['value']\n if draft_value == '':\n return []\n return json.loads(draft_value)\n\ndef schema_has_field(schema, name):\n questions = sum([page['questions'] for page in schema['pages']], [])\n qids = [q['qid'] for q in questions]\n return name in qids\n\n\nclass ERadRecordSet(BaseModel):\n code = models.CharField(max_length=64, primary_key=True)\n\n def get_or_create_record(self, kenkyusha_no, kadai_id, nendo):\n objs = ERadRecord.objects.filter(\n recordset=self, kenkyusha_no=kenkyusha_no, kadai_id=kadai_id,\n nendo=nendo,\n )\n if objs.exists():\n return objs.first()\n return ERadRecord.objects.create(\n recordset=self, kenkyusha_no=kenkyusha_no, kadai_id=kadai_id,\n nendo=nendo,\n )\n\n @classmethod\n def get_or_create(cls, code):\n objs = cls.objects.filter(code=code)\n if objs.exists():\n return objs.first()\n return cls.objects.create(code=code)\n\n\nclass ERadRecord(BaseModel):\n recordset = models.ForeignKey(ERadRecordSet, related_name='records',\n db_index=True, null=True, blank=True,\n on_delete=models.CASCADE)\n\n kenkyusha_no = models.TextField(blank=True, null=True, db_index=True)\n kenkyusha_shimei = EncryptedTextField(blank=True, null=True)\n\n kenkyukikan_cd = models.TextField(blank=True, null=True)\n kenkyukikan_mei = models.TextField(blank=True, null=True)\n\n haibunkikan_cd = models.TextField(blank=True, null=True)\n haibunkikan_mei = models.TextField(blank=True, null=True)\n\n nendo = models.IntegerField(blank=True, null=True)\n\n seido_cd = models.TextField(blank=True, null=True)\n seido_mei = models.TextField(blank=True, null=True)\n\n jigyo_cd = models.TextField(blank=True, null=True)\n jigyo_mei = models.TextField(blank=True, null=True)\n\n kadai_id = models.TextField(blank=True, null=True)\n kadai_mei = EncryptedTextField(blank=True, null=True)\n\n bunya_cd = models.TextField(blank=True, null=True)\n bunya_mei = models.TextField(blank=True, null=True)\n\n japan_grant_number = models.TextField(blank=True, null=True)\n program_name_ja = models.TextField(blank=True, null=True)\n program_name_en = models.TextField(blank=True, null=True)\n funding_stream_code = models.TextField(blank=True, null=True)\n\n class Meta:\n indexes = [\n models.Index(fields=['kenkyusha_no', 'kadai_id', 'nendo'])\n ]\n\n\nclass RegistrationReportFormat(BaseModel):\n registration_schema_id = models.CharField(max_length=64, blank=True, null=True)\n\n name = models.TextField(blank=True, null=True)\n\n default_filename = models.TextField(blank=True, null=True)\n csv_template = models.TextField(blank=True, null=True)\n\n\nclass UserSettings(BaseUserSettings):\n pass\n\n\nclass NodeSettings(BaseNodeSettings):\n project_metadata = models.TextField(blank=True, null=True)\n\n user_settings = models.ForeignKey(UserSettings, null=True, blank=True, on_delete=models.CASCADE)\n\n @property\n def complete(self):\n # Implementation for enumeration with /addons API\n return True\n\n def get_file_metadatas(self):\n files = []\n for m in self.file_metadata.filter(deleted__isnull=True):\n r = {\n 'generated': False,\n 'path': m.path,\n 'hash': m.hash,\n 'folder': m.folder,\n 'urlpath': m.resolve_urlpath(),\n }\n r.update(self._get_file_metadata(m))\n files.append(r)\n return files\n\n def get_file_metadata_for_path(self, path):\n q = self.file_metadata.filter(deleted__isnull=True, path=path)\n if not q.exists():\n parent, _ = os.path.split(path.strip('/'))\n if len(parent) == 0:\n return None\n r = self.get_file_metadata_for_path(parent + '/')\n if r is None:\n return None\n r['generated'] = True\n r['hash'] = None\n r['path'] = path\n return r\n m = q.first()\n r = {\n 'generated': False,\n 'path': m.path,\n 'folder': m.folder,\n 'hash': m.hash,\n 'urlpath': m.resolve_urlpath(),\n }\n r.update(self._get_file_metadata(m))\n return r\n\n def set_file_metadata(self, filepath, file_metadata, auth=None):\n self._validate_file_metadata(file_metadata)\n q = self.file_metadata.filter(deleted__isnull=True, path=filepath)\n if not q.exists():\n FileMetadata.objects.create(\n creator=auth.user if auth is not None else None,\n user=auth.user if auth is not None else None,\n project=self,\n path=filepath,\n hash=file_metadata['hash'],\n folder=file_metadata['folder'],\n metadata=json.dumps({'items': file_metadata['items']})\n )\n if auth:\n self.owner.add_log(\n action='metadata_file_added',\n params={\n 'project': self.owner.parent_id,\n 'node': self.owner._id,\n 'path': filepath,\n },\n auth=auth,\n )\n return\n m = q.first()\n m.hash = file_metadata['hash']\n m.metadata = json.dumps({'items': file_metadata['items']})\n m.user = auth.user if auth is not None else None\n for item in file_metadata['items']:\n if not item['active']:\n continue\n self._update_draft_files(\n item['schema'],\n filepath,\n item['data'])\n m.save()\n if auth:\n self.owner.add_log(\n action='metadata_file_updated',\n params={\n 'project': self.owner.parent_id,\n 'node': self.owner._id,\n 'path': filepath,\n },\n auth=auth,\n )\n\n def set_file_hash(self, filepath, hash):\n q = self.file_metadata.filter(deleted__isnull=True, path=filepath)\n if not q.exists():\n return\n m = q.first()\n m.hash = hash\n m.save()\n\n def delete_file_metadata(self, filepath, auth=None):\n q = self.file_metadata.filter(deleted__isnull=True, path=filepath)\n if not q.exists():\n return\n metadata = q.first()\n for schema in self._get_related_schemas(metadata.metadata):\n self._remove_draft_files(schema, filepath)\n metadata.deleted = timezone.now()\n metadata.save()\n if auth:\n self.owner.add_log(\n action='metadata_file_deleted',\n params={\n 'project': self.owner.parent_id,\n 'node': self.owner._id,\n 'path': filepath,\n },\n auth=auth,\n )\n\n def get_project_metadata(self):\n if self.project_metadata is None or self.project_metadata == '':\n r = {}\n else:\n r = json.loads(self.project_metadata)\n r.update({\n 'files': self.get_file_metadatas(),\n })\n return r\n\n def get_report_formats_for(self, schemas):\n formats = []\n for schema in schemas:\n for format in RegistrationReportFormat.objects.filter(registration_schema_id=schema._id):\n formats.append({\n 'schema_id': schema._id,\n 'name': format.name,\n })\n return {\n 'formats': formats\n }\n\n def update_file_metadata_for(self, action, payload, auth):\n if action in [NodeLog.FILE_RENAMED, NodeLog.FILE_MOVED, NodeLog.FILE_COPIED]:\n src = payload['source']\n dest = payload['destination']\n elif action in [NodeLog.FILE_REMOVED]:\n src = payload['metadata']\n dest = payload['metadata']\n else:\n return\n if src['nid'] == dest['nid']:\n source_addon = self\n else:\n source_node = AbstractNode.load(payload['source']['nid'])\n if source_node is None:\n return\n source_addon = source_node.get_addon(SHORT_NAME)\n if source_addon is None:\n return\n src_path = os.path.join(src['provider'], src['materialized'])\n dest_path = os.path.join(dest['provider'], dest['materialized'])\n if src_path.endswith('/'):\n q = source_addon.file_metadata.filter(path__startswith=src_path)\n path_suffixes = [fm.path[len(src_path):] for fm in q.all()]\n else:\n path_suffixes = ['']\n for path_suffix in path_suffixes:\n src_path_child = src_path + path_suffix\n dest_path_child = dest_path + path_suffix\n q = source_addon.file_metadata.filter(deleted__isnull=True, path=src_path_child)\n if not q.exists():\n continue\n if action in [NodeLog.FILE_RENAMED, NodeLog.FILE_MOVED, NodeLog.FILE_COPIED]:\n m = q.first()\n file_metadata = {\n 'path': dest_path_child,\n 'folder': m.folder,\n 'hash': m.hash,\n 'items': self._get_file_metadata(m).get('items', [])\n }\n self.set_file_metadata(dest_path_child, file_metadata, auth)\n if action in [NodeLog.FILE_RENAMED, NodeLog.FILE_MOVED, NodeLog.FILE_REMOVED]:\n self.delete_file_metadata(src_path_child, auth)\n\n def _get_file_metadata(self, file_metadata):\n if file_metadata.metadata is None or file_metadata.metadata == '':\n return {}\n return json.loads(file_metadata.metadata)\n\n def _validate_file_metadata(self, file_metadata):\n if 'path' not in file_metadata:\n raise ValueError('Property \"path\" is not defined')\n if 'folder' not in file_metadata:\n raise ValueError('Property \"folder\" is not defined')\n if 'hash' not in file_metadata:\n raise ValueError('Property \"hash\" is not defined')\n if 'items' not in file_metadata:\n raise ValueError('Property \"items\" is not defined')\n for i in file_metadata['items']:\n self._validate_file_metadata_item(i)\n\n def _validate_file_metadata_item(self, item):\n if 'active' not in item:\n raise ValueError('Property \"active\" is not defined')\n if 'schema' not in item:\n raise ValueError('Property \"schema\" is not defined')\n if 'data' not in item:\n raise ValueError('Property \"data\" is not defined')\n\n def _update_draft_files(self, schema, filepath, metadata):\n drafts = self._get_registration_schema(schema)\n for draft in drafts:\n draft_schema = draft.registration_schema.schema\n if not schema_has_field(draft_schema, FIELD_GRDM_FILES):\n raise ValueError('Schema has no grdm-files field')\n draft_metadata = draft.registration_metadata\n draft_files = get_draft_files(draft_metadata)\n for draft_file in draft_files:\n if draft_file['path'] != filepath:\n continue\n draft_file['metadata'] = metadata\n self._update_draft_grdm_files(draft, draft_files)\n\n def _remove_draft_files(self, schema, filepath):\n drafts = self._get_registration_schema(schema)\n for draft in drafts:\n draft_schema = draft.registration_schema.schema\n if not schema_has_field(draft_schema, FIELD_GRDM_FILES):\n raise ValueError('Schema has no grdm-files field')\n draft_metadata = draft.registration_metadata\n draft_files = get_draft_files(draft_metadata)\n draft_files = [draft_file\n for draft_file in draft_files\n if draft_file['path'] != filepath]\n self._update_draft_grdm_files(draft, draft_files)\n\n def _update_draft_grdm_files(self, draft, draft_files):\n value = json.dumps(draft_files, indent=2) if len(draft_files) > 0 else ''\n draft.update_metadata({\n FIELD_GRDM_FILES: {\n 'value': value,\n },\n })\n draft.save()\n\n def _get_related_schemas(self, metadata):\n if metadata is None or len(metadata) == 0:\n return []\n metadataobj = json.loads(metadata)\n if 'items' not in metadataobj:\n return []\n return [i['schema'] for i in metadataobj['items']]\n\n def _get_registration_schema(self, schema):\n try:\n registration_schema = RegistrationSchema.objects.get(_id=schema)\n drafts = DraftRegistration.objects.filter(\n branched_from=self.owner,\n registration_schema=registration_schema\n )\n return drafts\n except RegistrationSchema.DoesNotExist:\n return []\n\n\nclass FileMetadata(BaseModel):\n project = models.ForeignKey(NodeSettings, related_name='file_metadata',\n db_index=True, null=True, blank=True,\n on_delete=models.CASCADE)\n\n deleted = NonNaiveDateTimeField(blank=True, null=True)\n\n folder = models.BooleanField()\n\n path = models.TextField()\n\n hash = models.CharField(max_length=128, blank=True, null=True)\n\n metadata = models.TextField(blank=True, null=True)\n\n creator = models.ForeignKey(\n OSFUser,\n related_name='file_metadata_created',\n null=True,\n blank=True,\n on_delete=models.CASCADE,\n )\n\n user = models.ForeignKey(\n OSFUser,\n related_name='file_metadata_modified',\n null=True,\n blank=True,\n on_delete=models.CASCADE,\n )\n\n @classmethod\n def load(cls, project_id, path, select_for_update=False):\n try:\n return cls.objects.get(project__id=project_id, path=path) if not select_for_update else cls.objects.filter(project__id=project_id, path=path).select_for_update().get()\n except cls.DoesNotExist:\n return None\n\n @property\n def _id(self):\n path_id = self.path.replace('/', '_')\n return f'{self.project.owner._id}_{path_id}'\n\n @property\n def metadata_properties(self):\n if not self.metadata:\n return {}\n m = json.loads(self.metadata)\n return m\n\n @property\n def node(self):\n if self.project is None:\n return None\n return self.project.owner\n\n def resolve_urlpath(self):\n node = self.project.owner\n if self.folder:\n return node.url + 'files/dir/' + self.path\n m = re.match(r'([^\\/]+)(/.*)', self.path)\n if not m:\n raise ValueError('Malformed path: ' + self.path)\n provider = m.group(1)\n path = m.group(2)\n if provider == 'osfstorage':\n # materialized path -> object path\n content_type = ContentType.objects.get_for_model(node)\n filenode = [fn for fn in OsfStorageFileNode.objects.filter(\n target_content_type=content_type,\n target_object_id=node.id\n ) if fn.materialized_path == path]\n if len(filenode) == 0:\n logger.warn('No files: ' + self.path)\n return None\n path = filenode[0].path\n file_guids = BaseFileNode.resolve_class(provider, BaseFileNode.FILE).get_file_guids(\n materialized_path=path,\n provider=provider,\n target=node\n )\n if len(file_guids) == 0:\n fileUrl = node.url + 'files/' + provider + path\n logger.info('No guid: ' + self.path + '(provider=' + provider + ')')\n return fileUrl\n return '/' + file_guids[0] + '/'\n\n def update_search(self):\n from website import search\n try:\n search.search.update_file_metadata(self, bulk=False, async_update=True)\n except search.exceptions.SearchUnavailableError as e:\n logger.exception(e)\n\n def save(self, *args, **kwargs):\n rv = super(FileMetadata, self).save(*args, **kwargs)\n if self.node and (self.node.is_public or website_settings.ENABLE_PRIVATE_SEARCH):\n self.update_search()\n return rv\n","sub_path":"addons/metadata/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":17525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"408233744","text":"\"\"\"\n問題:https://judge.u-aizu.ac.jp/onlinejudge/description.jsp?id=ALDS1_4_B&lang=ja\n二分探索\n\"\"\"\n\nimport statistics\n\ndef find(L,X):\n median = statistics.median(L)\n\n if X == median:\n return True\n elif len(L) == 1:\n return False\n elif X < median:\n return find(L[:round(len(L) / 2)], X)\n else:\n return find(L[round(len(L) / 2):], X)\n\n\ndef resolve():\n N = int(input())\n S = list(map(int, input().split()))\n Q = int(input())\n T = list(map(int, input().split()))\n\n count = 0\n\n for t in T:\n if find(S,t):\n count += 1\n\n print(count)\n","sub_path":"by_category/binary_search/alds1_4_b.py","file_name":"alds1_4_b.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"118445254","text":"# 85번 복붙\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom keras.datasets import mnist # keras에서 제공되는 예제 파일 \n\nmnist.load_data() # mnist파일 불러오기\n\n(x_train, y_train), (x_test, y_test) = mnist.load_data() # mnist에서 이미 x_train, y_train으로 나눠져 있는 값 가져오기\n\nprint(x_train[0]) # 0 ~ 255까지의 숫자가 적혀짐 (color에 대한 수치)\nprint('y_train: ' , y_train[0]) # 5\n\nprint(x_train.shape) # (60000, 28, 28)\nprint(x_test.shape) # (10000, 28, 28)\nprint(y_train.shape) # (60000,) : 10000개의 xcalar를 가진 vector(1차원)\nprint(y_test.shape) # (10000,)\n\n\n\n# 데이터 전처리 1. 원핫인코딩 : 당연하다 => y 값 \nfrom keras.utils import np_utils\n\ny_train = np_utils.to_categorical(y_train)\ny_test = np_utils.to_categorical(y_test)\nprint(y_train.shape) # (60000, 10)\n\n# 데이터 전처리 2. 정규화( MinMaxScalar ) => x 값 \nx_train = x_train.reshape(60000, 28, 28, 1).astype('float32') /255 \nx_test = x_test.reshape(10000, 28, 28, 1).astype('float32') /255. \n\n\n# 불러오기\nfrom keras.models import load_model \n\nmodel = load_model('./model/model_test01.h5')\n\nmodel.summary()\n\n\n#4. 평가\nloss_acc = model.evaluate(x_test, y_test, batch_size= 64)\n\nprint('loss_acc: ', loss_acc) \n\ny_predict = model.predict(x_test[0: 10])\ny_predict = np.argmax(y_predict, axis =1)\nprint(y_predict)\n\n\n# loss = hist.history['loss'] # model.fit 에서 나온 값\n# val_loss = hist.history['val_loss']\n# acc = hist.history['acc']\n# val_acc = hist.history['val_acc']\n\n# print('acc: ', acc) \n# print('val_acc: ', val_acc)\n# print('loss_acc: ', loss_acc) \n\n# import matplotlib.pyplot as plt \n\n# plt.figure(figsize = (10, 6)) # 10 x 6인치의 판이 생김\n\n# # 1번 그림\n# plt.subplot(2, 1, 1) # (2, 1, 1) 2행 1열의 그림 1번째꺼 / subplot : 2장 그림 \n# plt.plot(hist.history['loss'], marker='.', c='red', label='loss') \n# plt.plot(hist.history['val_loss'], marker='.', c='blue', label='val_loss') \n# plt.grid() # 격자 생성\n# plt.title('loss')\n# plt.ylabel('loss')\n# plt.xlabel('epoch')\n# # plt.legend(['loss','val_loss']) \n# plt.legend(loc='upper right') \n\n# # 2번 그림\n# plt.subplot(2, 1, 2) # (2, 1, 2) 2행 1열의 그림 2번째꺼 \n# plt.plot(hist.history['acc']) \n# plt.plot(hist.history['val_acc']) \n# plt.grid() # 격자 생성\n# plt.title('accuracy')\n# plt.ylabel('accuracy')\n# plt.xlabel('epoch')\n# plt.legend(['acc','val_acc'])\n\n# plt.show() \n\n","sub_path":"keras/keras86_load_model1.py","file_name":"keras86_load_model1.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"12365887","text":"class Heap:\n def max_heapify(self, a, i, n):\n l = i * 2 + 1\n r = i * 2 + 2\n largest = i\n if l < n and a[l] > a[i]:\n largest = l\n if r < n and a[r] > a[largest]:\n largest = r\n if largest != i:\n temp = a[i]\n a[i] = a[largest]\n a[largest] = temp\n\n self.max_heapify(a, largest, n)\n\n return a\n\n def build_heap(self, a):\n for i in range(int(len(a) / 2), -1, -1):\n self.max_heapify(a, i, len(a))\n return a\n\n def heapsort(self, a):\n result = []\n while len(a) > 0:\n temp = a[0]\n a[0] = a[-1]\n a[-1] = temp\n\n result.append(a.pop(-1))\n\n self.max_heapify(a, 0, len(a))\n\n return result\n\n\nclass Heap_again:\n def max_heapify(self, a, i, n):\n\n l_i = i * 2 + 1\n r_i = i * 2 + 2\n largest_i = i\n if l_i < n and a[l_i] > a[i]:\n largest_i = l_i\n if r_i < n and a[r_i] > a[largest_i]:\n largest_i = r_i\n\n if largest_i == i:\n return a\n temp = a[largest_i]\n a[largest_i] = a[i]\n a[i] = temp\n\n return self.max_heapify(a, largest_i, n)\n\n def build_heap(self, a):\n for i in range(len(a) // 2, -1, -1):\n self.max_heapify(a, i, len(a))\n return a\n\n def heapsort(self, a):\n result = []\n self.build_heap(a)\n while len(a) > 0:\n temp = a[0]\n a[0] = a[len(a) - 1]\n a[len(a) - 1] = temp\n result.append(a.pop(-1))\n\n self.max_heapify(a, 0, len(a))\n\n return result\n","sub_path":"practice/heap.py","file_name":"heap.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"477832879","text":"import socket\nimport struct\nimport json\nimport subprocess\nimport os\n\nfrom conf.settings import *\nfrom core.auth import login\n\n\n\ndef run():\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n server_socket.bind((HOST, PORT))\n server_socket.listen(5)\n\n actions = {\n 'ls': check,\n 'get': get,\n 'put': put,\n }\n\n while True:\n print('starting...')\n conn, addr = server_socket.accept()\n username = conn.recv(1024)\n password = conn.recv(1024)\n result = login(username, password)\n if result:\n conn.send('True'.encode('utf-8'))\n while True:\n cmd = conn.recv(1024)\n if not cmd: break\n cmd_list = cmd.decode('utf-8').split()\n if cmd_list[0] in actions:\n actions[cmd_list[0]](conn, cmd, result)\n\n else:\n conn.send('False'.encode('utf-8'))\n\n\ndef check(conn, cmd, user_dict):\n if len(cmd.split()) == 1:\n cmd = 'ls %s' % user_dict['home_path']\n\n obj = subprocess.Popen(cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=True)\n\n stdout = obj.stdout.read()\n stderr = obj.stderr.read()\n if stderr:\n send_body = stderr\n else:\n send_body = stdout\n\n header = {'data_size': len(send_body)}\n header_bytes = json.dumps(header).encode('utf-8')\n header_length = struct.pack('i', len(header_bytes))\n\n conn.send(header_length)\n conn.send(header_bytes)\n conn.sendall(send_body)\n\n\ndef get(conn, cmd, user_dict):\n filename = cmd.decode('utf-8').split()[1]\n share_file = SHARE_DIR + filename\n\n header = {'filename': filename, 'file_size': os.path.getsize(filename)}\n header_bytes = json.dumps(header).encode('utf-8')\n header_length = struct.pack('i', len(header_bytes))\n\n conn.send(header_length)\n conn.send(header_bytes)\n\n with open(share_file, 'rb') as f:\n for line in f:\n conn.send(line)\n\n\n\ndef put():\n pass\n","sub_path":"模块三作业/FTP/ftp_server/core/ftp_server.py","file_name":"ftp_server.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"316142670","text":"# Given a sorted array, remove the duplicates in place such that each element appear only once and return the new length.\n\n# Do not allocate extra space for another array, \n# you must do this in place with constant memory.\n\n\nclass Solution:\n \"\"\"\n @param A: a list of integers\n @return an integer\n \"\"\"\n def removeDuplicates(self, A):\n # write your code here\n i=1\n j=0\n for i in range(1,len(A)):\n if A[i]==A[j]:\n i+=1\n else:\n \n A[j+1]=A[i]\n j+=1\n \n return j+1\n","sub_path":"remove_duplicates_from_sorted_array.py","file_name":"remove_duplicates_from_sorted_array.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"148700508","text":"import discord\nfrom discord.ext import commands\n\n\nclass Node:\n def __init__(self, question, emoji):\n self.question = question\n self.emoji = emoji\n self.children = []\n\n def add_children(self, *children):\n for child in children:\n self.children.append(child)\n\n def get_node(self, question):\n list = self.create_list()\n for node in list:\n if node.question == question:\n return node\n raise Exception(\"There is no node with this question in this tree!\")\n\n def create_list(self, list=[]):\n list.append(self)\n for child in self.children:\n child.create_list(list)\n return list\n\n def print_tree(self):\n print(self.question)\n for child in self.children:\n child.print_tree()\n\n\nroot = Node(':one: Comment rejoindre le serveur MC?\\n:two: Comment devenir builder officiel?', None)\nsecond = Node('Il faut déjà être builder officiel!', '1\\N{VARIATION SELECTOR-16}\\N{COMBINING ENCLOSING KEYCAP}')\nthird = Node(\"Il faut constuire 2 bâtiments en solo.\", '2\\N{VARIATION SELECTOR-16}\\N{COMBINING ENCLOSING KEYCAP}')\nroot.add_children(second, third)\n\n\nclass HelpBuilding(commands.Cog):\n def __init__(self, client):\n self.client = client\n\n async def add_emojis(self, message, node):\n for child in node.children:\n try:\n await message.add_reaction(child.emoji)\n except discord.errors.HTTPException:\n print(f\"Emoji {child.emoji} not found!\")\n\n @commands.command(brief='[WIP] Aide type FAQ en cours de création')\n async def build(self, ctx):\n if ctx.channel.type != discord.ChannelType.private:\n await ctx.send(f\"{ctx.author.mention}, regarde tes MPs! :mailbox:\")\n message = await ctx.author.send(embed=discord.Embed(description=root.question))\n await self.add_emojis(message, root)\n\n @commands.command(brief='Trouve le nom d\\'une emoji')\n @commands.check_any(commands.is_owner())\n async def emojiname(self, ctx, emoji):\n await ctx.author.send(emoji.encode('ascii', 'namereplace'))\n\n @commands.Cog.listener()\n async def on_reaction_add(self, reaction, user):\n if reaction.message.channel.type == discord.ChannelType.private and not user.bot:\n try:\n node = root.get_node(reaction.message.embeds[0].description)\n except Exception:\n return\n for child in node.children:\n if child.emoji == reaction.emoji:\n message = await reaction.message.channel.send(embed=discord.Embed(type='rich', description=child.question))\n await self.add_emojis(message, child)\n break\n\n\ndef setup(client):\n client.add_cog(HelpBuilding(client))\n","sub_path":"cogs/help_building.py","file_name":"help_building.py","file_ext":"py","file_size_in_byte":2827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"503688650","text":"import independentreserve as ir\nimport json\nimport time\n\nwith open('config.json', 'r') as file:\n config = json.loads(file.read())\n\nCONNECTION = ir.PublicMethods()\nAPI = ir.PrivateMethods(config['ApiKey'], config['ApiSecret'])\n\n\ndef get_open_orders_info():\n open_orders = API.get_open_orders(\n primary_currency_code=config['CurrencyCode']['primary'],\n secondary_currency_code=config['CurrencyCode']['secondary'],\n page_index=1,\n page_size=25\n )\n time.sleep(1)\n return open_orders\n\n\ndef guid_collection_for_open_orders():\n guid_collection = []\n orders_info = get_open_orders_info()\n if len(orders_info['Data']) > 0:\n for item in orders_info['Data']:\n guid_collection.append(item['OrderGuid'])\n return guid_collection\n else:\n return None\n\n\ndef cancel_all_orders():\n total_orders = guid_collection_for_open_orders()\n if total_orders is None:\n print('Empty open orders')\n else:\n for item in total_orders:\n API.cancel_order(item)\n time.sleep(1)\n\n\ndef account_balance():\n response = API.get_accounts()\n time.sleep(1)\n return response\n\n\ndef handle_filled_orders():\n closed_filled_orders = API.get_closed_filled_orders(\n primary_currency_code=config['CurrencyCode']['primary'],\n secondary_currency_code=config['CurrencyCode']['secondary'],\n page_index=1,\n page_size=50\n )\n time.sleep(1)\n return closed_filled_orders['Data'][0:16]\n\n\ndef get_order_amount():\n offer_amount = 0\n bid_amount = 0\n for item in config['Data']['Offer']:\n offer_amount += item['volume']\n for item in config['Data']['Bid']:\n bid_amount += item['volume'] * item['price']\n\n return offer_amount, bid_amount\n\n\ndef order_log(res):\n for item in res:\n print(f'{item[\"Type\"]} - Volume: {item[\"VolumeOrdered\"]}, Price: {item[\"Price\"]}')\n\n\nif __name__ == '__main__':\n # handle_filled_orders()\n # account_balance()\n # get_open_orders_info()\n cancel_all_orders()\n # get_order_amount()\n","sub_path":"order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"28568182","text":"#!/usr/bin/python\n#\n# Copyright 2016 Allister Banks/@arubdesu, pls tell me if I screwed it up\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"See docstring for JSONParser class\"\"\"\n\nimport json\nimport urllib2\n\nfrom autopkglib import Processor, ProcessorError\n\n__all__ = [\"JSONParser\"]\n\n\nMASTER_FEED_URL = \"https://objective-see.com/products.json\"\n\n\nclass JSONParser(Processor):\n #pylint disable=line-too-long\n \"\"\"Gets download links, based on products from the ObjectiveSee apps master feed\n Assumes handoff to URLDownloader as subsequent step.\n \"\"\"\n\n input_variables = {\n \"product\": {\n \"required\": True,\n \"description\":\n \"Which product from the objsee feed to fetch.\",\n },\n \"format\": {\n \"required\": False,\n \"description\": \n \"If multiple formats are provided, (e.g. pkg/dmg/zip)\"\n \"returns that URL, otherwise chooses zip.\",\n \"default\": \"zip\",\n },\n }\n output_variables = {\n \"url\": {\n \"description\":\n \"Returned download URL\",\n },\n \"version\": {\n \"description\":\n \"Returned version from JSON feed\",\n },\n }\n\n description = __doc__\n\n def main(self):\n \"\"\"gimme some main\"\"\"\n getit = self.env['product']\n full_feed = urllib2.urlopen(MASTER_FEED_URL, timeout = 3).read()\n full_dict = json.loads(full_feed)\n we_want = full_dict.get(getit)\n if we_want:\n our_format = self.env.get('format', 'zip')\n self.env['url'] = we_want.get(our_format)\n self.env['version'] = we_want.get('version')\n else:\n raise ProcessorError(\"Product or format not found\")\n\n\nif __name__ == '__main__':\n PROCESSOR = JSONParser()\n PROCESSOR.execute_shell()","sub_path":"SharedProcessors/JSONParser.py","file_name":"JSONParser.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"586535681","text":"import csv\r\n\r\ncsv_file=open(\"login_credentials.csv\",\"r\").readlines()\r\nheader=csv_file.pop(0)\r\nprint(header)\r\n\r\nfilename=1\r\n\r\nnum_of_lines=len(csv_file)\r\nfor i in range(num_of_lines):\r\n if i%int(num_of_lines/3+1)==0:\r\n write_file=open(str(filename)+\".csv\",\"w+\")\r\n write_file.write(header)\r\n write_file.writelines(csv_file[i:i+int(num_of_lines/3+1)])\r\n filename+=1","sub_path":"MultipleClientServerProgram/code/splitCSV.py","file_name":"splitCSV.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"497934164","text":"\nfrom lxml import etree\nimport xmltodict\nimport json\nfrom collections import OrderedDict\n\n\nXSI_NS = 'http://www.w3.org/2001/XMLSchema-instance'\n \nclass EDM(object):\n \"\"\"The standard EDM metadata format.\n \n\n It is registered under the name 'oai_dc'\n \"\"\"\n \n def __init__(self, prefix, config, db):\n self.prefix = prefix\n self.config = config\n self.db = db\n\n self.ns = {'oai_dc': 'http://www.openarchives.org/OAI/2.0/oai_dc/',\n 'dc':'http://purl.org/dc/elements/1.1/',\n 'dcterms':'http://purl.org/dc/terms/',\n 'dct': 'http://purl.org/dc/terms/',\n 'edm' : 'http://www.europeana.eu/schemas/edm/',\n 'foaf': 'http://xmlns.com/foaf/0.1/',\n 'owl' : 'http://www.w3.org/2002/07/owl#',\n 'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#',\n 'rdfs': 'http://www.w3.org/2000/01/rdf-schema#',\n 'skos': 'http://www.w3.org/2004/02/skos/core#',\n 'xsi' : 'http://www.w3.org/2001/XMLSchema-instance',\n 'ore': 'http://www.openarchives.org/ore/terms/',\n 'svcs': 'http://rdfs.org/sioc/services#',\n 'doap': 'http://usefulinc.com/ns/doap#',\n 'rdaGr2': 'http://rdvocab.info/ElementsGr2/'\n }\n self.schemas = {\n 'edm': 'http://www.europeana.eu/schemas/edm/EDM-EXTERNAL-MAIN.xsd'}\n \n def get_namespace(self):\n return self.ns[self.prefix]\n\n def get_schema_location(self):\n return self.schemas[self.prefix]\n \n def fix_ordering_of_edm_elements(self, rdf):\n edm_provider_cho_order = [\n \"dc:contributor\",\n \"dc:coverage\",\n \"dc:creator\",\n \"dc:date\",\n \"dc:description\",\n \"dc:format\",\n \"dc:identifier\",\n \"dc:language\",\n \"dc:publisher\",\n \"dc:relation\",\n \"dc:rights\",\n \"dc:source\",\n \"dc:subject\",\n \"dc:title\",\n \"dc:type\",\n \"dcterms:alternative\",\n \"dcterms:conformsTo\",\n \"dcterms:created\",\n \"dcterms:extent\",\n \"dcterms:hasFormat\",\n \"dcterms:hasPart\",\n \"dcterms:hasVersion\",\n \"dcterms:isFormatOf\",\n \"dcterms:isPartOf\",\n \"dcterms:isReferencedBy\",\n \"dcterms:isReplacedBy\",\n \"dcterms:isRequiredBy\",\n \"dcterms:issued\",\n \"dcterms:isVersionOf\",\n \"dcterms:medium\",\n \"dcterms:provenance\",\n \"dcterms:references\",\n \"dcterms:replaces\",\n \"dcterms:requires\",\n \"dcterms:spatial\",\n \"dcterms:tableOfContents\",\n \"dcterms:temporal\",\n \"edm:currentLocation\",\n \"edm:hasMet\",\n \"edm:hasType\",\n \"edm:incorporates\",\n \"edm:isDerivativeOf\",\n \"edm:isNextInSequence\",\n \"edm:isRelatedTo\",\n \"edm:isRepresentationOf\",\n \"edm:isSimilarTo\",\n \"edm:isSuccessorOf\",\n \"edm:realizes\",\n \"edm:type\",\n \"owl:sameAs\",\n ]\n web_resource_order = [\n \"dc:creator\",\n \"dc:description\",\n \"dc:format\",\n \"dc:rights\",\n \"dc:source\",\n \"dc:type\",\n \"dcterms:confromsTo\",\n \"dcterms:created\",\n \"dcterms:extent\",\n \"dcterms:hasPart\",\n \"dcterms:isFormatOf\",\n \"dcterms:isPartOf\",\n \"dcterms:isReferencedBy\",\n \"dcterms:issued\",\n \"edm:isNextInSequence\",\n \"edm:rights\",\n \"owl:sameAs\",\n \"svcs:has_service\",\n ]\n ore_agg_order = [\n \"edm:aggregatedCHO\",\n \"edm:dataProvider\",\n \"edm:hasView\",\n \"edm:isShownAt\",\n \"edm:isShownBy\",\n \"edm:object\",\n \"edm:provider\",\n \"dc:rights\",\n \"edm:rights\",\n \"edm:ugc\",\n ]\n edm_agent_order = [\n \"skos:prefLabel\",\n \"skos:altLabel\",\n \"skos:note\",\n \"dc:date\",\n \"dc:identifier\",\n \"dcterms:hasPart\",\n \"dcterms:isPartOf\",\n \"edm:begin\",\n \"edm:end\",\n \"edm:hasMet\",\n \"edm:isRelatedTo\",\n \"foaf:name\",\n \"rdaGr2:biographicalInformation\",\n \"rdaGr2:dateOfBirth\",\n \"rdaGr2:dateOfDeath\",\n \"rdaGr2:dateOfEstablishment\",\n \"rdaGr2:dateOfTermincation\",\n \"rdaGr2:gender\",\n \"rdaGr2:placeOfBirth\",\n \"rdaGr2:placeOfDeath\",\n \"rdaGr2:professionOrOccupation\",\n \"owl:sameAs\",\n ]\n edm_place_order = [\n \"wgs84_pos:lat\",\n \"wgs84_pos:long\",\n \"wgs84_pos:alt\",\n \"skos:prefLabel\",\n \"skos:altLabel\",\n \"skos:note\",\n \"dcterms:hasPart\",\n \"dcterms:isPartOf\",\n \"edm:isNextInSequence\",\n \"owl:sameAs\",\n ]\n edm_timespan_order = [\n \"skos:prefLabel\",\n \"skos:altLabel\",\n \"skos:note\",\n \"dcterms:hasPart\",\n \"dcterms:isPartOf\",\n \"edm:begin\",\n \"edm:end\",\n \"edm:isNextInSequence\",\n \"owl:sameAs\",\n ]\n skos_concept_order = [\n \"skos:prefLael\",\n \"skos:altLabel\",\n \"skos:broader\",\n \"skos:narrower\",\n \"skos:related\",\n \"skos:broadMatch\",\n \"skos:narrowMatch\",\n \"skos:relatedMatch\",\n \"skos:exactMatch\",\n \"skos:closeMatch\",\n \"skos:note\",\n \"skos:notation\",\n \"skos:inScheme\",\n ]\n cc_license_order = [\"odrl:inheritFrom\", \"cc:depreceatedOn\"]\n svcs_service_order = [\"dcterms:conformsTo\", \"doap:implements\"]\n\n rdf_key_order_mapping = {\n \"edm:ProvidedCHO\": edm_provider_cho_order,\n \"edm:WebResource\": web_resource_order,\n \"ore:Aggregation\": ore_agg_order,\n \"edm:Agent\": edm_agent_order,\n \"edm:Place\": edm_place_order,\n \"edm:TimeSpan\": edm_timespan_order,\n \"skos:Concept\": skos_concept_order,\n \"cc:License\": cc_license_order,\n \"svcs:Service\": svcs_service_order,\n }\n\n for rdf_key in rdf.keys():\n if rdf_key in rdf_key_order_mapping.keys():\n for key in rdf_key_order_mapping[rdf_key]:\n if isinstance(rdf[rdf_key], list):\n for n, contextual_class in enumerate(rdf[rdf_key]):\n if key in contextual_class.keys():\n rdf[rdf_key][n].move_to_end(key, last=True)\n else:\n if key in rdf[rdf_key].keys():\n rdf[rdf_key].move_to_end(key, last=True)\n \n def __call__(self, element, metadata):\n data = metadata.record\n if not data['metadata']: return\n for prefix, ns in self.ns.items():\n data['metadata']['rdf:RDF']['@xmlns:{}'.format(prefix)] = ns\n\n data['metadata']['rdf:RDF'] = json.loads(json.dumps(data['metadata']['rdf:RDF']),object_pairs_hook=OrderedDict)\n self.fix_ordering_of_edm_elements(data['metadata']['rdf:RDF'])\n \n metadata_unparsed = xmltodict.unparse(data['metadata'], full_document=False)\n e = etree.fromstring(metadata_unparsed)\n element.append(e)\n","sub_path":"moai/metadata/edm.py","file_name":"edm.py","file_ext":"py","file_size_in_byte":7724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"518720772","text":"# This file overrides some of the defaults to make tests run faster.\n# Use it by running ./manage.py test --settings=kive.test_settings\n\nfrom settings_test_pg import * # @UnusedWildImport\n\n# Run with an in-memory database: about twice as fast as PostgreSQL\nDATABASES['default'] = {'ENGINE': 'django.db.backends.sqlite3',\n 'TEST': {'NAME': ':memory:'},\n 'NAME': 'kive.db'}\n\nRUN_SLURM_TESTS = False # Slurm tests are incompatible with in-memory DB.\n","sub_path":"kive/kive/settings_test.py","file_name":"settings_test.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"327172290","text":"import os\nimport sys\nimport argparse\nfrom itertools import product\n\nfrom lib.colorer import color_stdout\n\n\ndef env_int(name, default):\n try:\n value = os.environ.get(name)\n return default if value is None else int(value)\n except ValueError:\n return default\n\n\ndef env_list(name, default):\n value_str = os.environ.get(name)\n if value_str is None:\n return default\n value_list = value_str.split()\n return value_list or default\n\n\nclass Options(object):\n \"\"\"Handle options of test-runner\"\"\"\n\n _instance = None\n _initialized = False\n\n def __new__(cls, *args, **kwargs):\n \"\"\"Make the class singleton.\"\"\"\n if cls._instance:\n return cls._instance\n cls._instance = super(Options, cls).__new__(cls, *args, **kwargs)\n return cls._instance\n\n def __init__(self):\n \"\"\"Add all program options, with their defaults.\"\"\"\n\n # The __init__() method is called always, even when we\n # return already initialized Options instance from\n # __new__().\n if Options._initialized:\n return\n\n parser = argparse.ArgumentParser(\n description=\"Tarantool regression test suite front-end.\")\n\n parser.epilog = \"For a complete description, use 'pydoc ./\" + \\\n os.path.basename(sys.argv[0]) + \"'\"\n\n parser.add_argument(\n \"tests\",\n metavar=\"test\",\n nargs=\"*\",\n default=env_list('TEST_RUN_TESTS', ['']),\n help=\"\"\"Can be empty. List of test names, to look for in\n suites. Each name is used as a substring to look for in the\n path to test file, e.g. \"show\" will run all tests that have\n \"show\" in their name in all suites, \"box/show\" will only enable\n tests starting with \"show\" in \"box\" suite. Default: run all\n tests in all specified suites.\"\"\")\n\n parser.add_argument(\n \"--exclude\",\n action='append',\n default=env_list('TEST_RUN_EXCLUDE', []),\n help=\"\"\"Set an exclusion pattern. When a full test name (say,\n app-tap/string.test.lua) contains the pattern as a substring,\n the test will be excluded from execution. The option can be\n passed several times.\"\"\")\n\n parser.add_argument(\n \"--suite\",\n dest='suites',\n metavar=\"suite\",\n nargs=\"*\",\n default=[],\n help=\"\"\"List of test suites to look for tests in. Default: \"\" -\n means find all available.\"\"\")\n\n parser.add_argument(\n \"--verbose\",\n dest='is_verbose',\n action=\"store_true\",\n default=False,\n help=\"\"\"Print TAP13 test output to log.\n Default: false.\"\"\")\n\n parser.add_argument(\n '--debug',\n dest='debug',\n action='store_true',\n default=False,\n help=\"\"\"Print test-run logs to the terminal.\n Default: false.\"\"\")\n\n parser.add_argument(\n \"--force\",\n dest=\"is_force\",\n action=\"store_true\",\n default=False,\n help=\"\"\"Go on with other tests in case of an individual test failure.\n Default: false.\"\"\")\n\n parser.add_argument(\n \"--gdb\",\n dest=\"gdb\",\n action=\"store_true\",\n default=False,\n help=\"\"\"Start the server under 'gdb' debugger in detached\n Screen. This option is mutually exclusive with --valgrind,\n --gdbserver, --lldb and --strace.\n Default: false.\"\"\")\n\n parser.add_argument(\n \"--gdbserver\",\n dest=\"gdbserver\",\n action=\"store_true\",\n default=False,\n help=\"\"\"Start the server under 'gdbserver'. This option is\n mutually exclusive with --valgrind, --gdb, --lldb and --strace.\n Default: false.\"\"\")\n\n parser.add_argument(\n \"--lldb\",\n dest=\"lldb\",\n action=\"store_true\",\n default=False,\n help=\"\"\"Start the server under 'lldb' debugger in detached\n Screen. This option is mutually exclusive with --valgrind,\n --gdb, --gdbserver and --strace.\n Default: false.\"\"\")\n\n parser.add_argument(\n \"--valgrind\",\n dest=\"valgrind\",\n action=\"store_true\",\n default=False,\n help=\"\"\"Run the server under 'valgrind'. This option is\n mutually exclusive with --gdb, --gdbserver, --lldb and\n --strace.\n Default: false.\"\"\")\n\n parser.add_argument(\n \"--strace\",\n dest=\"strace\",\n action=\"store_true\",\n default=False,\n help=\"\"\"Run the server under 'strace'. This option is mutually\n exclusive with --valgrind, --gdb, --gdbserver, --lldb and\n --strace.\n Default: false.\"\"\")\n\n parser.add_argument(\n \"--builddir\",\n dest=\"builddir\",\n default=\"..\",\n help=\"\"\"Path to project build directory. Default: \"..\" \"\"\")\n\n parser.add_argument(\n \"--tarantool-port\",\n dest=\"tarantool_port\",\n default=None,\n help=\"\"\"Listen port number to run tests against. Admin port\n number must be listen + 1\"\"\")\n\n parser.add_argument(\n \"--vardir\",\n dest=\"vardir\",\n default=\"var\",\n help=\"\"\"Path to data directory. Default: var.\"\"\")\n parser.add_argument(\n \"--long\",\n dest=\"long\",\n default=False,\n action='store_true',\n help=\"\"\"Enable long run tests\"\"\")\n\n parser.add_argument(\n \"--conf\",\n dest=\"conf\",\n default=None,\n help=\"\"\"Force set test configuration mode\"\"\")\n\n parser.add_argument(\n \"-j\", \"--jobs\",\n dest=\"jobs\",\n const=0,\n nargs='?',\n default=env_int('TEST_RUN_JOBS', 0),\n type=int,\n help=\"\"\"Workers count. Default: ${TEST_RUN_JOBS} or 0 (0 means\n 2 x CPU count). -1 means everything running consistently\n (single process). \"\"\")\n\n parser.add_argument(\n \"--reproduce\",\n dest=\"reproduce\",\n default=None,\n help=\"\"\"Run tests in the order given by the file.\n Such files created by workers in the \"var/reproduce\" directory.\n Note: The option works now only with parallel testing.\"\"\")\n\n parser.add_argument(\n \"--test-timeout\",\n dest=\"test_timeout\",\n default=env_int('TEST_TIMEOUT', 110),\n type=int,\n help=\"\"\"Break the test process with kill signal if the test runs\n longer than this amount of seconds. Default: 110 [seconds].\"\"\")\n\n parser.add_argument(\n \"--no-output-timeout\",\n dest=\"no_output_timeout\",\n default=env_int('NO_OUTPUT_TIMEOUT', 120),\n type=int,\n help=\"\"\"Exit if there was no output from workers during this\n amount of seconds. Set it to -1 to disable hang detection.\n Default: 120 [seconds] (but disabled when one of --gdb, --llgb,\n --valgrind, --long options is passed).\n Note: The option works now only with parallel testing.\"\"\")\n\n parser.add_argument(\n \"--replication-sync-timeout\",\n dest=\"replication_sync_timeout\",\n default=env_int('REPLICATION_SYNC_TIMEOUT', 100),\n type=int,\n help=\"\"\"The number of seconds that a replica will wait when\n trying to sync with a master in a cluster, or a quorum of\n masters, after connecting or during configuration update.\n This could fail indefinitely if replication_sync_lag is smaller\n than network latency, or if the replica cannot keep pace with\n master updates. If replication_sync_timeout expires, the replica\n enters orphan status.\n Default: 100 [seconds].\"\"\")\n\n parser.add_argument(\n \"--luacov\",\n dest=\"luacov\",\n action=\"store_true\",\n default=False,\n help=\"\"\"Run the server under 'luacov'.\n Default: false.\"\"\")\n\n parser.add_argument(\n \"--update-result\",\n dest=\"update_result\",\n action=\"store_true\",\n default=False,\n help=\"\"\"Update or create file with reference output (.result).\n Default: false.\"\"\")\n\n parser.add_argument(\n \"--snapshot\",\n dest='snapshot_path',\n default=None,\n type=os.path.abspath,\n help=\"\"\"Path to snapshot that will be loaded before testing.\"\"\")\n\n parser.add_argument(\n \"--disable-schema-upgrade\",\n dest='disable_schema_upgrade',\n action=\"store_true\",\n default=False,\n help=\"\"\"Disable schema upgrade on testing with snapshots.\"\"\")\n parser.add_argument(\n \"--memtx-allocator\",\n dest=\"memtx_allocator\",\n default=os.environ.get(\"MEMTX_ALLOCATOR\", \"small\"),\n help=\"\"\"Memtx allocator type for tests\"\"\")\n\n # XXX: We can use parser.parse_intermixed_args() on\n # Python 3.7 to understand commands like\n # ./test-run.py foo --exclude bar baz\n self.args = parser.parse_args()\n self.check()\n\n Options._initialized = True\n\n def check(self):\n \"\"\"Check the arguments for correctness.\"\"\"\n check_error = False\n conflict_options = ('valgrind', 'gdb', 'lldb', 'strace')\n for op1, op2 in product(conflict_options, repeat=2):\n if op1 != op2 and getattr(self.args, op1, '') and \\\n getattr(self.args, op2, ''):\n format_str = \"\\nError: option --{} is not compatible with option --{}\\n\"\n color_stdout(format_str.format(op1, op2), schema='error')\n check_error = True\n break\n\n snapshot_path = self.args.snapshot_path\n if self.args.disable_schema_upgrade and not snapshot_path:\n color_stdout(\"\\nOption --disable-schema-upgrade requires --snapshot\\n\",\n schema='error')\n check_error = True\n\n if snapshot_path and not os.path.exists(snapshot_path):\n color_stdout(\"\\nPath {} does not exist\\n\".format(snapshot_path), schema='error')\n check_error = True\n\n if check_error:\n exit(-1)\n\n def check_schema_upgrade_option(self, is_debug):\n if self.args.disable_schema_upgrade and not is_debug:\n color_stdout(\"Can't disable schema upgrade on release build\\n\", schema='error')\n exit(1)\n","sub_path":"lib/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":11586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"232231874","text":"import numpy as np\nfrom numpy.linalg import eig, svd\nfrom numpy.linalg import norm as linorm\n\nfrom matplotlib import pyplot\nimport os\n\n# from PIL import Image\n\n\"\"\"\nINPUT: Single Eigen vector of U, \n corresponding eigen value and eigenvector of V\nOutput : Returns u*sigma*vt of same shape as original matrix(64,64)\n\"\"\"\ndef u_sig_v_vect(u_vector, sigma, v_vector):\n mat_1 = np.expand_dims(u_vector,axis=0)\n mat_2 = np.expand_dims(v_vector,axis=0)\n u_sigma = np.transpose(mat_1)*sigma\n result = np.dot(u_sigma, mat_2)\n return result\n\n\"\"\"\nINPUT: image input path and number of top eigen values \nOutput : Returns svd on image at path\n\"\"\"\ndef svd_on_top_eig(path, top_eig_vals = 2):\n image = pyplot.imread(path)\n # pyplot.imshow(image, pyplot.cm.gray)\n # pyplot.show()\n\n image = np.array(image,dtype=np.float32)\n image = image/255.0\n u, s, vt = np.linalg.svd(image, full_matrices=True)\n\n u_h = np.squeeze(np.hsplit(u,64))\n v_h = np.squeeze(np.hsplit(np.transpose(vt),64))\n\n for i in range(top_eig_vals):\n if i==0:\n u_s_vt = u_sig_v_vect(u_h[i], s[i], v_h[i])\n # pyplot.imshow(u_s_vt, pyplot.cm.gray)\n # pyplot.show()\n elif i>0:\n u_s_vt += u_sig_v_vect(u_h[i], s[i], v_h[i])\n # pyplot.imshow(u_s_vt, pyplot.cm.gray)\n # pyplot.show()\n return u_s_vt\n\n\"\"\"\nINPUT: image, list of path of all representative images and\n list of class they represent\nOutput : predicted class by comparing norm \n\"\"\"\ndef find_img_cls(image, rpr_img_ls, rpr_cls):\n norm_min = np.inf\n for i, rpr_path in enumerate(rpr_img_ls):\n rpr_img = np.load(rpr_path + \".npy\")\n res = image - rpr_img\n norm = np.sum(res**2)\n # norm = linorm(res, ord=2)\n\n if norm < norm_min:\n norm_min = norm \n pred_cls = rpr_cls[i]\n # print(rpr_path, pred_cls, norm)\n return pred_cls\n\"\"\"\nINPUT: list of dataset images, corresponding classes,\n path of all representative images and list of class they represent\nOutput : Accuracy\n\"\"\"\ndef check_accuracy(image_ls, class_ls, rpr_img_ls, rpr_cls):\n count = 0\n for i, path in enumerate(image_ls): \n image = pyplot.imread(path) \n image = np.array(image,dtype=np.float32)\n image = image/255.0\n image_cls_pred = find_img_cls(image, rpr_img_ls, rpr_cls)\n if int(image_cls_pred) == int(class_ls[i]) :\n count += 1\n else:\n print(\"misclassified image path and predicted \", path, image_cls_pred)\n print(\"count\",count)\n return count/1.5\n\n# Create empty lists to store\nimage_ls, class_ls, repr_img_ls, repr_cls_ls = [], [], [], []\n# Top eigen values to decompose\nTOP_EIG_VAL = 64\ndir = os.listdir(\"Dataset_Question1\")\n# Create directories to save files\ntry:\n os.mkdir(\"np_files\")\n os.mkdir(\"representative_img\")\nexcept OSError:\n print (\"Directories already present\")\nelse:\n print (\"Successfully created the directory \")\n\nfor fl in dir:\n a = os.listdir(\"Dataset_Question1\"+\"/\" + str(fl))\n final_repr_image = np.zeros([64,64])\n for fls in a:\n path = \"Dataset_Question1\"+\"/\" + str(fl) + \"/\" + fls\n image_ls.append(path)\n class_ls.append(fl)\n # print(path)\n final_repr_image += svd_on_top_eig(path, TOP_EIG_VAL)\n final_repr_image = final_repr_image/10.0\n np_files_path = \"np_files\"+\"/\" + str(fl) #+ \".png\"\n np.save(np_files_path, final_repr_image) \n repr_img_ls.append(np_files_path)\n repr_cls_ls.append(str(fl))\n\n save_path = \"representative_img\"+\"/\" + str(fl) + \".png\"\n pyplot.imsave(save_path, final_repr_image, cmap=pyplot.cm.gray)\n\n # pyplot.imshow(final_repr_image, pyplot.cm.gray)\n # pyplot.show()\n# print(image_ls, class_ls, repr_img_ls, repr_cls_ls)\nch = check_accuracy(image_ls, class_ls, repr_img_ls, repr_cls_ls)\nprint(\"Accuracy percentage is : \",ch)","sub_path":"SVD_Analysis.py","file_name":"SVD_Analysis.py","file_ext":"py","file_size_in_byte":3904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"91059242","text":"__author__ = 'Song'\nimport numpy\nimport theano\nimport theano.tensor as T\nimport theano.tensor.extra_ops as Te\n\nclass RiskLayer(object):\n def __init__(self, input, n_in, n_out, rng):\n # rng = numpy.random.RandomState(111111)\n # initialize with 0 the weights W as a matrix of shape (n_in, n_out)\n self.W = theano.shared(\n value = numpy.asarray(\n rng.uniform(\n low=-numpy.sqrt(6. / (n_in + n_out)),\n high=numpy.sqrt(6. / (n_in + n_out)),\n size=(n_in, n_out)\n ),\n # rng.normal(size=(n_in, n_out)),\n dtype=theano.config.floatX\n ),\n name='W',\n borrow=True\n )\n\n # initialize the baises b as a vector of n_out 0s\n # self.b = theano.shared(\n # value=numpy.zeros(n_out, dtype=theano.config.floatX),\n # name='b',\n # borrow=True\n # )\n self.input = input\n self.output = T.dot(self.input, self.W ).flatten()\n self.params = [self.W ]\n\n def cost(self, observed, at_risk):\n prediction = self.output\n exp = T.exp(prediction)[::-1]\n partial_sum = Te.cumsum(exp)[::-1] + 1 # get the reversed partial cumulative sum\n log_at_risk = T.log(partial_sum[at_risk])\n diff = prediction - log_at_risk\n cost = T.sum(T.dot(observed, diff))\n return cost\n\n def gradient(self, observed, at_risk):\n prediction = self.output\n risk = T.exp(prediction)\n product = self.input * (risk * T.ones((1, self.input.shape[0])))\n numerator = Te.cumsum(product[::-1])[::-1][at_risk]\n denominator = Te.cumsum(risk[::-1])[::-1][at_risk] * T.ones((1, self.input.shape[0]))\n numerator = numerator.flatten()\n denominator = denominator.flatten()\n gradient = T.dot(observed, self.input - (numerator / denominator))\n return gradient\n\n def reset_weight(self, params):\n self.W.set_value(params)\n\n","sub_path":"RiskLayer.py","file_name":"RiskLayer.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"558602736","text":"\"\"\"\nthreading_demo1.py - threading demo from Chapter 8\n\nCreates a Thread instance directly.\n\"\"\"\n\n\nfrom threading import Thread\nfrom zipfile import ZipFile, ZIP_DEFLATED\n\n\ndef zip_it(infile, outfile):\n with ZipFile(outfile, 'w', ZIP_DEFLATED) as f:\n f.write(infile)\n print('Finished', infile)\n\n\n# create a Thread instance that will call zip_it, passing args tuple\nbackground = Thread(target=zip_it,\n args=('inventory.csv', 'inventory.zip'))\n\nbackground.start() # start the Thread executing\n\nprint('Main thread continues to run in foreground')\n\nbackground.join() # Wait for the child_process task to finish\nprint('Main thread waited until child_process was done.')\n\n\n","sub_path":"examples/ch08_examples/thread_demo1.py","file_name":"thread_demo1.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"250473176","text":"from tkinter import *\nfrom tkinter import ttk\n\nroot = Tk()\n\n# A frame is a widget that surrounds/holds other widgets.\nframe = Frame(root)\n\n# TkInter variable that will be used to change the for one of the label components\nlabelText = StringVar()\n\n# Create a Label widget. The first arg puts it inside the frame. The second arg sets the text of the label.\nlabel = Label(frame, textvariable=labelText)\n\n# Create a button widget\nbutton = Button(frame, text=\"Click me\")\n\n# set the text of the label\nlabelText.set(\"This is a label\")\n\n# pack is a very simplistic geometry manager. See the python file pack_geometry_manager.py for more info.\nlabel.pack()\nbutton.pack()\nframe.pack()\n\nroot.mainloop()\n\n","sub_path":"frames_and_labels.py","file_name":"frames_and_labels.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"413287315","text":"import tornado.ioloop;\nimport tornado.web\nfrom datetime import date\nfrom SomeData import SomeData\n\nclass VersionHandler(tornado.web.RequestHandler):\n def get(self):\n response={\"version\":\"1.0.0\",\"time\":date.today().isoformat() }\n self.write(response)\n\n\napplication=tornado.web.Application([\n\n (r\"/version\",VersionHandler),(r\"/SomeData\",SomeData)\n])\n\nif __name__==\"__main__\":\n application.listen(8888)\n print(\"Listnening on port 8888\")\n tornado.ioloop.IOLoop.instance().start()","sub_path":"App.py","file_name":"App.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"263761892","text":"\n\n#calss header\nclass _HALLMARK():\n\tdef __init__(self,): \n\t\tself.name = \"HALLMARK\"\n\t\tself.definitions = [u'to put an official mark on an object made of gold or silver']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_hallmark.py","file_name":"_hallmark.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"370240444","text":"# coding: utf-8\n\"\"\"\nDjango settings for evoluirmais project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\n# import os\n# BASE_DIR = os.path.dirname(os.path.dirname(__file__))\nfrom decouple import config\nfrom dj_database_url import parse as db_url\nfrom unipath import Path\nBASE_DIR = Path(__file__).parent\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = config('SECRET_KEY')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = config('DEBUG', default=False, cast=bool)\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = [\n '.localhost',\n '127.0.0.1',\n '.herokuapp.com',\n '.evoluirmais.com.br',\n '.ow7.com.br',\n]\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'grappelli_extensions',\n 'grappelli',\n\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n # other apps\n 'south',\n 'sorl.thumbnail',\n 'bootstrap3',\n\n # my apps\n 'evoluirmais.core',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'evoluirmais.urls'\n\nWSGI_APPLICATION = 'evoluirmais.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.6/ref/settings/#databases\n\nDATABASES = {\n 'default': config(\n 'DATABASE_URL',\n default='sqlite:///' + BASE_DIR.child('db.sqlite3'),\n cast=db_url),\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGE_CODE = 'pt-br'\n\nTIME_ZONE = 'America/Recife'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\n\nSTATIC_ROOT = BASE_DIR.child('staticfiles')\nSTATIC_URL = '/static/'\n\nMEDIA_ROOT = BASE_DIR.child('media')\nMEDIA_URL = '/media/'\n\n\n# EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\nDEFAULT_FROM_EMAIL = config('DEFAULT_FROM_EMAIL')\nEMAIL_USE_TLS = config('EMAIL_USE_TLS')\nEMAIL_HOST = config('EMAIL_HOST')\nEMAIL_HOST_USER = config('EMAIL_HOST_USER')\nEMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD')\nEMAIL_PORT = config('EMAIL_PORT')\n\n\n# django-tinymce\n# TINYMCE_JS_URL = STATIC_URL + 'tiny_mce/tiny_mce.js'\nTINYMCE_DEFAULT_CONFIG = {\n 'theme_advanced_buttons1': \"cut,copy,paste,|,undo,redo,|,cleanup,|,bold,\\\n italic,underline,strikethrough,|,forecolor,\\\n backcolor,|,justifyleft,justifycenter,\\\n justifyright,justifyfull,|,help,|,code\",\n 'theme_advanced_buttons2': \"removeformat,formatselect,fontsizeselect,|,\\\n bullist,numlist,outdent,indent,|,link,unlink,\\\n anchor,sub,sup,|,hr,advhr,visualaid,|,image,\\\n media,|,preview,\",\n 'height': '350',\n 'file_browser_callback': 'mce_filebrowser',\n}\n\n\n# grappelli\nGRAPPELLI_ADMIN_TITLE = 'OW7 | CMS'\n\n# GRAPPELLI_EXTENSIONS_NAVBAR = 'evoluirmais.extensions.Navbar'\n\n# GRAPPELLI_EXTENSIONS_SIDEBAR = 'evoluirmais.extensions.Sidebar'\n\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n)\n\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.core.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.media',\n 'django.core.context_processors.static',\n 'django.core.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n 'django.core.context_processors.request',\n)\n\n\n# # south {taggit}\n# SOUTH_MIGRATION_MODULES = {\n# 'taggit': 'taggit.south_migrations',\n# }\n","sub_path":"evoluirmais/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"258031473","text":"# Modals\nfrom sklearn.linear_model import LinearRegression, LogisticRegression\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import RandomForestRegressor, AdaBoostClassifier\n# Test train split\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\n# Error\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error\nimport csv\n\n# Reading the data.\nfileName = \"./train_data.csv\"\ndata = pd.read_csv(fileName)\n\n# Defing the X and Y.\nX = data[['memory_GB', 'network_log10_MBps',\n 'local_IO_log10_MBps', 'NFS_IO_log10_MBps']]\nY = data['failed']\nZ = data[['job_id','failed']]\n\n# Splitting the data for training and testing.\ntrain_x, test_x, train_y, test_y = train_test_split(\n X, Y, train_size=0.8, test_size=0.2, random_state=0)\n\n\ndef DecisionTree():\n myModal = DecisionTreeRegressor()\n myModal.fit(train_x, train_y)\n predicated_y = myModal.predict(test_x)\n return Measure_Error(test_y, predicated_y)\n\n\ndef LinearReg():\n myModal = LinearRegression()\n myModal.fit(train_x, train_y)\n predicated_y = myModal.predict(test_x)\n return Measure_Error(test_y, predicated_y)\n\n\ndef LogisticReg():\n myModal = LogisticRegression()\n myModal.fit(train_x, train_y)\n predicated_y = myModal.predict(test_x)\n return Measure_Error(test_y, predicated_y)\n\n\ndef RandomForest():\n myModal = RandomForestRegressor()\n myModal.fit(train_x, train_y)\n predicated_y = myModal.predict(test_x)\n return Measure_Error(test_y, predicated_y)\n\n\ndef AdaBoost():\n myModal = AdaBoostClassifier()\n myModal.fit(train_x, train_y)\n predicated_y = myModal.predict(test_x)\n return Measure_Error(test_y, predicated_y)\n\n\ndef Measure_Error(test_y, predicated_y):\n return [mean_absolute_error(test_y, predicated_y), mean_squared_error(test_y, predicated_y)]\n\n\n##### Print #####\nprint(\"Mean absolute Error (Lower === Better)\")\nprint(\"Decision Tree:\", DecisionTree())\nprint(\"Linear Regression:\", LinearReg())\nprint(\"Logistic Regression:\", LogisticReg())\nprint(\"Random Forest:\", RandomForest())\nprint(\"AdaBoost:\", AdaBoost())\n\n#### Copying data from one csv file to other #####\nfilename1 = \"model_complete_test.csv\"\nwith open(filename1,'w') as csvfile:\n csvwriter = csv.writer(csvfile)\n csvwriter.writerow(Z)\n\n# #### Merging Data from one csv file to other####\n# a = pd.read_csv(\"train_data.csv\")\n# b = pd.read_csv(\"model_complete_test.csv\")\n# b = b.dropna(axis=1)\n# merged = a.merge(b, on='job_id')\n# merged.to_csv(\"output.csv\", index=False)\n","sub_path":"model-csv.py","file_name":"model-csv.py","file_ext":"py","file_size_in_byte":2515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"408736046","text":"import numpy as np\n\n\n\n\ndef DH(alpha1, a1, theta2, d2):\n \"\"\"\n 正运动学求解,DH参数法\n ---------------------------\n Forward kinematics solution, DH parameter method\n \"\"\"\n T = np.array(\n [[np.cos(theta2), -np.sin(theta2), 0, a1],\n [np.sin(theta2)*np.cos(alpha1), np.cos(theta2)*np.cos(alpha1), -np.sin(alpha1), -np.sin(alpha1)*d2],\n [np.sin(theta2)*np.sin(alpha1), np.cos(theta2)*np.sin(alpha1), np.cos(alpha1), np.cos(alpha1)*d2],\n [0, 0, 0, 1]]\n )\n\n return T\n\n\n","sub_path":"control/kinematics/forward.py","file_name":"forward.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"507834618","text":"'''\n\n6.\tData Cleaning\na.\tExclude Null Islands\ni.\tFirst see % of total data set  reflect % of test set\nii.\tThere could be a certain set of characteristics for null islanders\nb.\tCheck if all users exist in dataset – check if user id follows a counter - NO\nc.\tCheck each user has all valid features\ni.\tWhat to do with null island?\nii.\tWhat to do with 25?\n1.\tHow should we include it?\na.\tAdditional features indicating whether original features have valid inputs\nd.\tOutlier detection\ni.\tPca/clustering/general – centrally xxx\n1.\tGive me distance from each point; plot and get rid of obvious outliers\n2.\tCould just plot\ne.\tWhat to do with users with no connections\ni.\tCould give some indication\n\n1.\tFirst predict latitude, then predict longitude, and vice versa\n2.\tPredict latitude, then include latitude as a feature, then predict longitude\n3.\tReverse that\n\n\n'''\n\nimport numpy as np\nimport pickle\n\n\nclass cleaning(object):\n\n def __init__(self):\n\n self.users = []\n self.unique_ids = set()\n self.num_users = 0\n self.null_islanders = set()\n self.miss_features = set()\n self.locations = []\n\n #hours stuff\n self.hour1 = set()\n self.hour2 = set()\n self.hour3 = set()\n\n def start(self):\n\n f = open(\"./data/posts_train.txt\", \"r\")\n\n first_line = True\n for line in f:\n\n if first_line:\n first_line = False\n else:\n user_info = line.split(\",\")\n self.unique_ids.add(int(user_info[0]))\n self.all_features(user_info)\n self.null_islander(user_info)\n self.invalid_hours(user_info)\n self.users.append(user_info)\n self.locations.append(float(user_info[4]))\n self.locations.append(float(user_info[5]))\n\n print(f\"There are {len(self.unique_ids)} unique IDs.\")\n print(f\"There are {len(self.users)} number of users.\")\n print(f\"There are {len(self.null_islanders)} number of null islands.\")\n print(f\"There are {len(self.hour1)} number of invalid hour1s, {len(self.hour2)} number of invalid hour2s \"\n f\"and {len(self.hour3)} number of invalid hour3s.\")\n print(f\"There are {len(self.miss_features)} people who miss features.\")\n\n print(f\"Intersections\")\n print(len(self.null_islanders.intersection(self.hour1)))\n print(len(self.hour1.intersection(self.hour2)))\n print(len(self.hour2.intersection(self.hour3)))\n print(len(self.hour1.intersection(self.hour3)))\n print(len(self.hour1.intersection(self.hour2).intersection(self.hour3)))\n print(len(self.hour1.intersection(self.hour2).intersection(self.hour3).intersection(self.null_islanders)))\n\n # self.null_islanders = set()\n # self.miss_features = set()\n #\n # # hours stuff\n # self.hour1 = set()\n # self.hour2 = set()\n # self.hour3 = set()\n\n print(\"standard deviation\")\n print(np.std(self.locations))\n\n print(np.random.choice(np.array(list(self.unique_ids)), 100))\n\n pickle_out = open(\"./data/id_by_group.pkl\", 'wb')\n desc = \"list of arrays: 100 random user ids, null_islanders, hour1, hour2, hour3\"\n pickle.dump(((np.random.choice(np.array(list(self.unique_ids)), 100), list(self.null_islanders), list(self.hour1), list(self.hour2), list(self.hour3)), desc), pickle_out)\n pickle_out.close()\n\n def all_features(self, user_array):\n\n if len(user_array) != 7:\n print(f\"Missing features for {user_array[0]}\")\n self.miss_features.add(user_array[0])\n\n def null_islander(self, user_array):\n\n # Id,Hour1,Hour2,Hour3,Lat,Lon,Posts\n if float(user_array[4]) == 0.0 and float(user_array[5]) == 0.0:\n self.null_islanders.add(user_array[0])\n\n def invalid_hours(self, user_array):\n\n if int(user_array[1]) == 25:\n self.hour1.add(user_array[0])\n if int(user_array[2]) == 25:\n self.hour2.add(user_array[0])\n if int(user_array[3]) == 25:\n self.hour3.add(user_array[0])\n\n for i in range(1,4):\n # print(int(user_array[i]))\n # print(np.arange(1, 26))\n # print(int(user_array[i]) not in np.arange(1, 26))\n\n if int(user_array[i]) not in np.arange(0, 24) and int(user_array[i]) != 25:\n print(f\"{user_array[0]} has an hour out of range\")\n print(int(user_array[i]))\n\n\nif __name__ == \"__main__\":\n\n myClean = cleaning()\n myClean.start()\n","sub_path":"src/cleaning.py","file_name":"cleaning.py","file_ext":"py","file_size_in_byte":4585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"363409883","text":"import torch\n\nclass ConvEncoder(torch.nn.Module):\n # reconstruct an image, but using the inductive bias of passing through a pattern completion module\n def __init__(self, convolutional=False,h_size=512):\n super(ConvEncoder, self).__init__()\n self.convolutional = convolutional\n self.bn0 = torch.nn.BatchNorm1d(100)\n self.h_size=h_size\n if convolutional:\n self.seq1 = torch.nn.Sequential(*[torch.nn.Conv1d(1, out_channels=64, kernel_size=5, stride=2),\n torch.nn.MaxPool1d(2), torch.nn.LeakyReLU(),\n torch.nn.BatchNorm1d(64),\n torch.nn.Conv1d(64, out_channels=64, kernel_size=5, stride=1),\n torch.nn.MaxPool1d(2), torch.nn.LeakyReLU(),\n torch.nn.BatchNorm1d(64),\n torch.nn.Conv1d(64, out_channels=64, kernel_size=3, stride=1),\n torch.nn.Flatten(),\n torch.nn.LeakyReLU(),\n torch.nn.Linear(512,h_size)\n ])\n\n else:\n self.seq1 = torch.nn.Sequential(*[torch.nn.Linear(100, 32), torch.nn.LeakyReLU(),\n torch.nn.Linear(32, 32), torch.nn.LeakyReLU(),\n torch.nn.Linear(32, 512)])\n\n def forward(self, x):\n x=self.bn0(x)\n x=x.unsqueeze(1)\n return self.seq1(x).squeeze(1)\nclass VAE(torch.nn.Module):\n def __init__(self,h_size=512):\n super(VAE, self).__init__()\n self.h_size=h_size\n self.bn0 = torch.nn.BatchNorm1d(100)\n self.conv1=torch.nn.Conv1d(1, out_channels=64, kernel_size=5, stride=2)\n self.mp1=torch.nn.MaxPool1d(2,return_indices=True)\n self.relu1=torch.nn.LeakyReLU()\n self.bn1=torch.nn.BatchNorm1d(64)\n self.conv2=torch.nn.Conv1d(64, out_channels=64, kernel_size=5, stride=1)\n self.mp2=torch.nn.MaxPool1d(2,return_indices=True)\n self.relu2=torch.nn.LeakyReLU()\n self.bn2=torch.nn.BatchNorm1d(64)\n self.conv3=torch.nn.Conv1d(64, out_channels=64, kernel_size=3, stride=1)\n self.to_h=torch.nn.Linear(512,h_size)\n self.to_logsigma=torch.nn.Linear(512,h_size)\n self.from_h=torch.nn.Linear(h_size,512)\n self.deconv3=torch.nn.ConvTranspose1d(64,out_channels=64,kernel_size=3,stride=1)\n self.relu1t=torch.nn.LeakyReLU()\n self.mpt2=torch.nn.MaxUnpool1d(2)\n self.deconv2=torch.nn.ConvTranspose1d(64,out_channels=64,kernel_size=5,stride=1)\n self.relu2t=torch.nn.LeakyReLU()\n self.mpt1=torch.nn.MaxUnpool1d(2)\n self.deconv1=torch.nn.ConvTranspose1d(64,out_channels=1,kernel_size=6,stride=2)\n\n def forward(self,x):\n eps=torch.randn((len(x),self.h_size)).to(x.device)\n x=self.bn0(x)\n x=x.unsqueeze(1)\n x=self.conv1(x)\n x,ids1=self.mp1(x)\n x=self.relu1(x)\n x=self.bn1(x)\n x=self.conv2(x)\n x,ids2=self.mp2(x)\n x=self.relu2(x)\n x=self.bn2(x)\n x=self.conv3(x).reshape((len(x),-1))\n h=self.to_h(x)\n logsigma=self.to_logsigma(x)\n x=h+eps*torch.exp(logsigma)\n x=self.from_h(x)\n x=x.reshape((len(x),64,8))\n x=self.deconv3(x)\n x=self.relu2t(x)\n x=self.mpt2(x,indices=ids2)\n x=self.deconv2(x)\n x=self.relu1t(x)\n x=self.mpt1(x,indices=ids1)\n x=self.deconv1(x)\n return x.squeeze(1),h,logsigma\n\n\n\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"386767136","text":"from math import sqrt\nn, m = map(int, input().split())\nans = 0\nfor i in range(1, int(m ** 0.5) + 1):\n if m % i == 0 and i <= m / n:\n if m // i <= m / n:\n ans = m // i\n break\n else:\n ans = i\n\nprint(ans)\n","sub_path":"boot/hard/partition.py","file_name":"partition.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"563117235","text":"#\n# Copyright (c) SAS Institute Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\njob command\n\"\"\"\nimport os\n\nfrom conary.lib import options\n\nfrom .. import errors\nfrom ..lib import command, jobs\n\n\nclass JobCommand(command.CommandWithSubCommands):\n \"\"\"\n Job command for creating/changing jenkins jobs\n \"\"\"\n help = 'Create/Edit jenkins jobs'\n commands = ['jobs']\n\n\nclass JobSubCommand(command.BaseCommand):\n paramHelp = '+'\n requireConfig = True\n\n def addLocalParameters(self, argDef):\n argDef['project'] = (options.OPT_PARAM, 'Path to project, defaults to'\n ' current working directory')\n\n def runCommand(self, cfg, argSet, args, **kwargs):\n _, self.jobList = self.requireParameters(\n args, expected='file', appendExtra=True)\n\n\nclass JobBuildCommand(JobSubCommand):\n help = \"Build a jenkins job\"\n command = [\"build\", \"run\"]\n docs = {\"no-watch\": \"Don't watch jobs as they run\",\n }\n\n def addLocalParameters(self, argDef):\n argDef['no-watch'] = options.NO_PARAM\n\n def runCommand(self, cfg, argSet, args, **kwargs):\n JobSubCommand.runCommand(self, cfg, argSet, args, **kwargs)\n watch = not argSet.pop(\"no-watch\", False)\n jobs.buildJobs(cfg, self.jobList, watch)\nJobCommand.registerSubCommand(\"build\", JobBuildCommand)\n\n\nclass JobCreateCommand(JobSubCommand):\n help = 'Create a jenkins job'\n command = ['create']\n\n def runCommand(self, cfg, argSet, args, **kwargs):\n JobSubCommand.runCommand(self, cfg, argSet, args, **kwargs)\n jobs.createJobs(cfg, self.jobList)\nJobCommand.registerSubCommand('create', JobCreateCommand)\n\n\nclass JobRetrieveCommand(JobSubCommand):\n help = 'Retrieve a jenkins job'\n command = ['retrieve']\n paramHelp = '[JOB]*'\n requireConfig = True\n\n def addLocalParameters(self, argDef):\n JobSubCommand.addLocalParameters(self, argDef)\n argDef['filter'] = (options.OPT_PARAM, 'Filter to apply to jobs')\n\n def runCommand(self, cfg, argSet, args, **kwargs):\n _, jobList = self.requireParameters(args, allowExtra=True)\n if not jobList:\n jobList = None\n\n projectDir = argSet.pop('project', os.getcwd())\n jobFilter = argSet.pop('filter', None)\n\n projectDir = os.path.abspath(projectDir)\n jobDir = os.path.join(projectDir, cfg.jobDir)\n\n # verify jobDir exist\n if not (os.path.exists(jobDir) and os.path.isdir(jobDir)):\n raise errors.CommandError(\n 'no jobs directory found in %s' % (projectDir)\n )\n\n jobs.retrieveJobs(cfg, jobList, jobDir, jobFilter)\nJobCommand.registerSubCommand('retrieve', JobRetrieveCommand)\n\n\nclass JobDisableCommand(JobSubCommand):\n help = 'Disable a jenkins job'\n commands = ['disable', 'off']\n\n def addLocalParameters(self, argDef):\n JobSubCommand.addLocalParameters(self, argDef)\n argDef['force'] = (options.NO_PARAM, 'Force update of local config')\n\n def runCommand(self, cfg, argSet, args, **kwargs):\n JobSubCommand.runCommand(self, cfg, argSet, args, **kwargs)\n\n force = argSet.pop('force', False)\n\n jobs.disableJobs(cfg, self.jobList, force)\nJobCommand.registerSubCommand('disable', JobDisableCommand)\n\n\nclass JobEnableCommand(JobSubCommand):\n help = 'Enable a jenkins job'\n commands = ['enable', 'on']\n\n def addLocalParameters(self, argDef):\n JobSubCommand.addLocalParameters(self, argDef)\n argDef['force'] = (options.NO_PARAM, 'Force update of local config')\n\n def runCommand(self, cfg, argSet, args, **kwargs):\n JobSubCommand.runCommand(self, cfg, argSet, args, **kwargs)\n force = argSet.pop('force', False)\n jobs.enableJobs(cfg, self.jobList, force)\nJobCommand.registerSubCommand('enable', JobEnableCommand)\n\n\nclass JobDeleteCommand(JobSubCommand):\n help = 'Delete a jenkins job'\n commands = ['delete']\n\n def addLocalParameters(self, argDef):\n JobSubCommand.addLocalParameters(self, argDef)\n argDef['force'] = (options.NO_PARAM, 'Also delete local config file')\n\n def runCommand(self, cfg, argSet, args, **kwargs):\n JobSubCommand.runCommand(self, cfg, argSet, args, **kwargs)\n force = argSet.pop('force', False)\n jobs.deleteJobs(cfg, self.jobList, force)\nJobCommand.registerSubCommand('delete', JobDeleteCommand)\n\n\nclass JobUpdateCommand(JobSubCommand):\n help = 'Update a jenkins job'\n commands = ['update']\n\n def runCommand(self, cfg, argSet, args, **kwargs):\n JobSubCommand.runCommand(self, cfg, argSet, args, **kwargs)\n jobs.updateJobs(cfg, self.jobList)\nJobCommand.registerSubCommand('update', JobUpdateCommand)\n","sub_path":"jbutler/commands/jobscommand.py","file_name":"jobscommand.py","file_ext":"py","file_size_in_byte":5237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"164020703","text":"\n\nfrom xai.brain.wordbase.adjectives._epicurean import _EPICUREAN\n\n#calss header\nclass _EPICUREANS(_EPICUREAN, ):\n\tdef __init__(self,): \n\t\t_EPICUREAN.__init__(self)\n\t\tself.name = \"EPICUREANS\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"epicurean\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_epicureans.py","file_name":"_epicureans.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"637032318","text":"from django.contrib import admin\n\nfrom . models import Artist\nfrom songs.models import Song\n\nclass SongInline(admin.StackedInline):\n model = Song\n can_delete = False\n\nclass ArtistAdmin (admin.ModelAdmin):\n inlines = (SongInline, )\n list_display = (\n \"name\",\n )\n\nadmin.site.register(Artist, ArtistAdmin)\n\n","sub_path":"artists/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"333126613","text":"#ouvrir un fichier data.txt\n#lire les valeurs\n#mettre les valeurs dans un dictionnaire\n\n\n# V1 ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n#trier le dictionnaire par valeur\nimport re\n\n#opening input file\nfile = open('data.txt', 'r')\n#print(file.read())\n\n#reading input file\nList = file.read()\n\n#regex declaration\nregex = r\"(.*),(.*)\\n?\"\n\n#regex matches iterations\nmatches = re.finditer(regex, List)\n\n#dictionary declaration\nDickus = dict()\n\n#input values into dictionary\nfor matchNum, match in enumerate(matches):\n matchNum = matchNum + 1\n\n for groupNum in range(0, len(match.groups())):\n if groupNum == 1 :\n a = match.group(groupNum).rstrip().replace(\" \",\"\")\n\n groupNum = groupNum + 1\n\n #print(\"Group {groupNum} found: {group}\".format(groupNum=groupNum, group=match.group(groupNum)))\n\n if groupNum == 2:\n b = match.group(groupNum).rstrip().replace(\" \",\"\")\n print('a =',a,',b =',b)\n Dickus[a] = int(b)\n\n #print(Dickus)\n\n#output sorted dictionary\nprint('Dictionary:',Dickus,'\\nSorted Dictionary:',sorted(Dickus),'\\n\\n')\n\nfile.close()\n\n\n# V2 ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n#opening input file\nfile = open('data.txt', 'r')\n\n#reading input file\nlines = file.readlines()\n\n#dictionary\nDickus = dict()\n\n#deleting \\n\nfor x in range(len(lines)):\n lines[x] = lines[x].rstrip().split(\",\")\n\n for y in range(len(lines[x])):\n lines[x][y] = lines[x][y].replace(\" \",\"\")\n\n#filling dictionary\nfor x in range(len(lines)):\n Dickus[lines[x][0]] = int(lines[x][1])\n\n\n\nsortval = []\nsortkey = []\n#retrieving dictionnary values\nfor x in Dickus:\n sortval.append(Dickus[x])\n sortkey.append(x)\n\n\n#sort by value\nfor x in range(len(sortval)):\n\n for y in range(len(sortval)):\n\n if sortval[x] < sortval[y]:\n a = sortval[x]\n b = sortkey[x]\n sortval[x] = sortval[y]\n sortval[y] = a\n sortkey[x] = sortkey[y]\n sortkey[y] = b\n\n#output\nprint('Dictionary:', Dickus, '\\nSorted by keys:', sorted(Dickus),'\\nSorted by values',sortval)\n\nprint('\\nby keys:')\nfor x in sorted(Dickus):\n print(x,'=',Dickus[x])\n\nprint('\\nby values:')\nfor x in range(len(sortval)):\n print(sortval[x] , '=' , sortkey[x])\n\nfile.close()\n\n# V3 ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n\n#opening input file\nfile = open('data.txt', 'r')\n\n#reading input file\nlines = file.readlines()\n\n#dictionary\nDickus = dict()\n\n#deleting \\n\nfor x in range(len(lines)):\n lines[x] = lines[x].rstrip().split(\",\")\n\n for y in range(len(lines[x])):\n lines[x][y] = lines[x][y].replace(\" \",\"\")\n\n#filling dictionary\nfor x in range(len(lines)):\n Dickus[lines[x][0]] = int(lines[x][1])\n\n#sort by key\nSKDickus = dict()\n\nfor x in sorted(Dickus):\n SKDickus[x] = Dickus[x]\n\n#sort by value\nsortval = [(v,k) for k,v in Dickus.items()]\nsortval.sort()\nsortval = [(k,v) for v,k in sortval]\n\nSVDickus = dict()\nfor x in range(len(sortval)):\n SVDickus[sortval[x][0]] = sortval[x][1]\n\n#output\nprint('\\nDictionary:', Dickus, '\\nSorted by keys:', SKDickus,'\\nSorted by values',SVDickus)","sub_path":"exemple_4_OT.py","file_name":"exemple_4_OT.py","file_ext":"py","file_size_in_byte":3309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"91483422","text":"# По данной непустой строке s длины не более 10^4, состоящей из строчных букв\n# латинского алфавита, постройте оптимальный беспрефиксный код. В первой\n# строке выведите количество различных букв k, встречающихся в строке,\n# и размер получившейся закодированной строки. В следующих k строках\n# запишите коды букв в формате \"letter: code\". В последней строке выведите\n# закодированную строку.\n\nstring = input()\ns = set(string)\nq=[]\nfreq = {}\nfor i in s:\n freq[i]=string.count(i)\n#print(freq)\nfor key in freq.keys():\n q.append([freq[key],key])\nks=list(freq.keys())\nprint(ks)\nwhile len(q)>1:\n q.sort()\n print(\"prom\",q)\n q[1]=[q[0][0]+q[1][0],q[0][1]+q[1][1]]\n del(q[0])\n print(q)","sub_path":"Algorythms/Haffman& cueue with priorities.py","file_name":"Haffman& cueue with priorities.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"171334288","text":"# Lint as: python2, python3\n# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for tfx.types.artifact_utils.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\n# Standard Imports\n\nimport tensorflow as tf\nfrom tfx.types import artifact_utils\nfrom tfx.types import standard_artifacts\n\n\nclass ArtifactUtilsTest(tf.test.TestCase):\n\n def testGetFromSingleList(self):\n \"\"\"Test various retrieval utilities on a single list of Artifact.\"\"\"\n artifacts = [standard_artifacts.Examples()]\n artifacts[0].uri = '/tmp/evaluri'\n artifacts[0].split_names = '[\"eval\"]'\n self.assertEqual(artifacts[0],\n artifact_utils.get_single_instance(artifacts))\n self.assertEqual('/tmp/evaluri', artifact_utils.get_single_uri(artifacts))\n self.assertEqual('/tmp/evaluri/eval',\n artifact_utils.get_split_uri(artifacts, 'eval'))\n with self.assertRaises(ValueError):\n artifact_utils.get_split_uri(artifacts, 'train')\n\n def testGetFromSplits(self):\n \"\"\"Test various retrieval utilities on a list of split Artifact.\"\"\"\n artifacts = [standard_artifacts.Examples()]\n artifacts[0].uri = '/tmp'\n artifacts[0].split_names = artifact_utils.encode_split_names(\n ['train', 'eval'])\n\n self.assertEqual(artifacts[0].split_names, '[\"train\", \"eval\"]')\n\n self.assertIs(artifact_utils.get_single_instance(artifacts), artifacts[0])\n self.assertEqual('/tmp', artifact_utils.get_single_uri(artifacts))\n self.assertEqual('/tmp/train',\n artifact_utils.get_split_uri(artifacts, 'train'))\n self.assertEqual('/tmp/eval',\n artifact_utils.get_split_uri(artifacts, 'eval'))\n\n\nif __name__ == '__main__':\n tf.test.main()\n","sub_path":"tfx/types/artifact_utils_test.py","file_name":"artifact_utils_test.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"448086099","text":"\nimport cv2\nimport numpy as np\nimport math\nimport time\nimport logging\nfrom managed_threading import ManagedThread\nlogger = logging.getLogger(\"image_recognition\")\n\nclass Point(object):\n \"\"\"\n Cartesian coordinate (meters)\n \"\"\"\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.angle = self.angle_rad = math.atan2(x, y)\n self.angle_deg = math.degrees(self.angle_rad)\n self.dist = (x**2 + y**2)**0.5\n\n def __iter__(self):\n yield self.x\n yield self.y\n\n def __add__(self, other_point):\n return self.translate(other_point)\n\n def __sub__(self, other_point):\n return Point(self.x - other_point.x, self.y - other_point.y)\n\n def __lshift__(self, angle_rad):\n return self.rotate(-angle_rad)\n\n def __rshift__(self, angle_rad):\n return self.rotate(angle_rad)\n\n def rotate(self, angle_rad):\n return Point(\n self.x * math.cos(angle_rad) - self.y * math.sin(angle_rad),\n self.x * math.sin(angle_rad) + self.y * math.cos(angle_rad))\n\n def translate(self, other_point):\n return Point(self.x + other_point.x, self.y + other_point.y)\n\n\nclass PolarPoint(Point):\n \"\"\"\n Polar coordinate (radians, meters)\n \"\"\"\n\n def __init__(self, angle, dist):\n self.angle = self.angle_rad = angle # radians\n self.dist = dist # distance in meters\n self.angle_deg = math.degrees(self.angle)\n self.x = self.dist * math.cos(self.angle_rad)\n self.y = self.dist * math.sin(self.angle_rad)\n\n\nclass ImageRecognition(object):\n\n GOAL_FIELD_DILATION = 40\n GOAL_BOTTOM = 200\n\n # Y U Y V\n FIELD_LOWER = 32, 0, 32, 0\n FIELD_UPPER = 255, 140, 255, 150\n\n YELLOW_LOWER = 128, 50, 128, 150\n YELLOW_UPPER = 255, 100, 255, 200\n\n\n BLUE_LOWER = 0, 150, 0, 0\n BLUE_UPPER = 255, 255, 255, 128\n\n\n BALL_LOWER = 64, 0, 64, 220\n BALL_UPPER = 255, 96, 255, 255\n\n\n KICKER_OFFSET = 0 # -50\n # Ball search scope vertically\n BALLS_BOTTOM = 300\n\n def __init__(self, frame, copy=True, camera_height=0.22, camera_mount_radius=0.07, dist_goals=4.6, camera_vert_fov=72, camera_horiz_fov=54):\n \"\"\"\n Create image recognition object for 8-headed camera mount which internally\n tracks the state and corrects sensor readings\n\n Keyword arguments:\n frame -- 320x4230 YUYV frame\n dist_goals -- Goal to goal distance\n camera_height -- Camera height from the floor (m)\n camera_mount_radius -- Camera distance from the center of the robot (m)\n camera_vert_fov -- Camera field of view vertically (deg)\n camera_horiz_fov -- Camera field of view horizontally (deg)\n \"\"\"\n\n self.ball_grabbed_green1 = 584>>1, 4320- (2200 + 32) #4320-2184,\n self.ball_grabbed_orange = 606>>1, 4320-(2200) # 4320-2216\n self.ball_grabbed_green2 = 584>>1, 4320-(2200 - 32) #4320-2248,\n\n# self.ball_grabbed_green1 = 564>>1, 4320-2184,\n# self.ball_grabbed_orange = 586>>1, 4320-2216\n# self.ball_grabbed_green2 = 564>>1, 4320-2248,\n\n\n\n\n self.dist_goals = dist_goals\n self.camera_height = camera_height\n self.camera_mount_radius = camera_mount_radius\n self.camera_horiz_fov_rad = math.radians(camera_horiz_fov)\n self.camera_vert_fov_rad = math.radians(camera_vert_fov)\n self.update(frame)\n\n assert abs(self.x_to_deg(self.deg_to_x(50)) - 50) < 0.1, self.x_to_deg(self.deg_to_x(50))\n assert abs(self.y_to_dist(self.dist_to_y(2.0)) - 2.0) < 0.1\n\n def update(self, frame):\n assert frame.shape == (4320, 320, 4)\n self.frame = frame\n self.field_mask, self.field_mask_dilated = self._recognize_field(self.FIELD_LOWER, self.FIELD_UPPER)\n\n self.goal_blue_mask, \\\n self.goal_blue, \\\n self.goal_blue_rect = self._recognize_goal(self.BLUE_LOWER, self.BLUE_UPPER)\n self.goal_yellow_mask, \\\n self.goal_yellow, \\\n self.goal_yellow_rect = self._recognize_goal(self.YELLOW_LOWER, self.YELLOW_UPPER)\n\n self.robot, self.orientation = self._position_robot() # Calculate x and y coords on the field and angle to grid\n self.balls_mask, self.balls = self._recognize_balls()\n self.ball_grabbed = self._recognize_ball_grabbed()\n\n\n def _recognize_ball_grabbed(self):\n if self.frame is None:\n return False\n orange_pixels = cv2.inRange(self.frame[\n self.ball_grabbed_orange[1]-4:self.ball_grabbed_orange[1]+4,\n self.ball_grabbed_orange[0]-4:self.ball_grabbed_orange[0]+4],\n self.BALL_LOWER, self.BALL_UPPER).sum()\n green1_pixels = cv2.inRange(self.frame[\n self.ball_grabbed_green1[1]-4:self.ball_grabbed_green1[1]+4,\n self.ball_grabbed_green1[0]-4:self.ball_grabbed_green1[0]+4],\n self.FIELD_LOWER, self.FIELD_UPPER).sum()\n green2_pixels = cv2.inRange(self.frame[\n self.ball_grabbed_green2[1]-4:self.ball_grabbed_green2[1]+4,\n self.ball_grabbed_green2[0]-4:self.ball_grabbed_green2[0]+4],\n self.FIELD_LOWER, self.FIELD_UPPER).sum()\n #print(\"orange:\", orange_pixels, \"green1_pixels:\", green1_pixels, \"green2_pixels:\", green2_pixels)\n return orange_pixels > 1000 and green1_pixels > 1000 and green2_pixels > 1000\n\n def _position_robot(self):\n if not self.goal_blue or not self.goal_yellow:\n logger.info(\"Both goal not detected!\")\n return None, None\n\n if self.goal_blue.dist + self.goal_yellow.dist > 7:\n logger.info(\"Both goals overlap!\")\n #print(\"GLITCH READING DISTANCES, got:\", self.goal_blue.dist, self.goal_yellow.dist)\n return None, None\n\n # Perceived angle between goals\n rad_diff = abs(self.goal_yellow.angle_rad - self.goal_blue.angle_rad)\n\n # Correct perceived distances based on the angle and known goal-goal distance\n derived_dist = math.sqrt(self.goal_yellow.dist ** 2 + self.goal_blue.dist ** 2 - 2 * self.goal_yellow.dist * self.goal_blue.dist * math.cos(rad_diff))\n\n if not derived_dist:\n # FAILBOX\n return None, None\n\n\n correction_factor = self.dist_goals / derived_dist # Divide goal-goal disance with percevied distance\n# print(\"correction:\", correction_factor)\n\n self.goal_yellow.dist *= correction_factor\n self.goal_blue.dist *= correction_factor\n\n #assert self.goal_blue.dist + self.goal_yellow.dist > self.dist_goals, \"%.1fm\" % (self.goal_blue.dist + self.goal_yellow.dist)\n assert self.dist_goals ** 2 - (self.goal_blue.dist ** 2 + self.goal_yellow.dist ** 2 - 2 * self.goal_blue.dist * self.goal_yellow.dist * math.cos(rad_diff)) < 0.00001, \\\n \"%.1f %.1f\" % (self.dist_goals ** 2, self.goal_blue.dist ** 2 + self.goal_yellow.dist ** 2 - 2 * self.goal_blue.dist * self.goal_yellow.dist * math.cos(rad_diff))\n\n # Calculate distance projection along the line from goal to goal\n robot_x = (self.dist_goals**2-self.goal_blue.dist**2+self.goal_yellow.dist**2)/(2*self.dist_goals)\n try:\n robot_y = -math.sqrt(self.goal_yellow.dist**2-robot_x**2)\n except ValueError:\n logger.info(\"Triangulation failed\")\n return None, None\n\n # thx Fred, Lauri's too st00pid for this shit\n if rad_diff > math.pi:\n robot_y = -robot_y\n if self.goal_yellow.angle_rad > self.goal_blue.angle_rad:\n robot_y = -robot_y\n\n # Knowing distance of goals and the angle between goals\n # we can derive the other angles\n sine = math.sin(rad_diff)\n if sine == 0:\n logger.info(\"Both goals overlap!\")\n return None, None\n circumcircle_diameter = 4.6 / sine\n rad_blue = math.asin(self.goal_blue.dist / circumcircle_diameter)\n rad_yellow = math.asin(self.goal_yellow.dist / circumcircle_diameter)\n\n # TODO: Check if we got sensible triangle here\n\n orientation_rad = (-rad_blue -self.goal_blue.angle_rad) % (2*math.pi)\n\n return Point(robot_x, robot_y), orientation_rad\n\n\n def _recognize_field(self, lower, upper):\n mask = cv2.inRange(self.frame, lower, upper)\n assert mask.shape == (4320, 320)\n mask = cv2.erode(mask, None, iterations=4)\n\n slices = [mask[:480,:]]\n\n # iterate over cameras because otherwise convex hull wraps around distorted field edges\n for j in range(1,9):\n sliced = mask[j*480-20:(j+1)*480,:]\n _, contours, hierarchy = cv2.findContours(sliced, 1, 5)\n sliced = mask[j*480-20:(j+1)*480,:]\n contours = [c for c in contours if cv2.contourArea(c) > 30]\n if contours:\n merged = np.vstack(contours) # merge contours\n hull = cv2.convexHull(merged) # get convex hull poly\n cv2.drawContours(sliced, [hull],0, 9, -1) # Fill in mask with convex hull\n slices.append(sliced[20:,:])\n\n mask = np.vstack(slices)\n assert mask.shape == (4320, 320), \"got instead %s\" % repr(mask.shape)\n\n return mask, cv2.dilate(mask, None, iterations=10)\n\n def _recognize_goal(self, lower, upper):\n # Recognize yellow goal\n\n goal_mask = cv2.inRange(self.frame[:,:self.BALLS_BOTTOM-self.GOAL_FIELD_DILATION], lower, upper)\n goal_mask = cv2.erode(goal_mask, None, iterations=4)\n mask = cv2.bitwise_and(goal_mask, self.field_mask_dilated[:,self.GOAL_FIELD_DILATION:self.BALLS_BOTTOM])\n\n goal_mask = mask\n\n rect = None\n maxwidth = 0\n step = 1 * 480\n scope = 3 * 480\n rects = []\n for j in range(0,9):\n sliced = mask[j*step:j*step+scope,:]\n\n _, contours, hierarchy = cv2.findContours(sliced, 1, 2)\n contours = sorted(contours, key=cv2.contourArea)[-1:]\n\n if contours:\n merged = np.vstack(contours)\n hull = cv2.convexHull(merged) # get convex hull poly\n y,x,h,w = cv2.boundingRect(hull)\n\n if w < scope and w > maxwidth:\n maxwidth = w\n rect = 4320-(x+j*step)-w,2*y,w,h\n rects.append(rect)\n\n if maxwidth:\n x,y,w,h = rect # done\n return goal_mask, PolarPoint(self.x_to_rad(x+w/2.0), self.y_to_dist(y+h+self.GOAL_FIELD_DILATION)), rects\n return goal_mask, None, []\n\n def _recognize_balls(self):\n mask = cv2.inRange(self.frame[:,:self.BALLS_BOTTOM], self.BALL_LOWER, self.BALL_UPPER)\n mask = cv2.bitwise_and(mask, self.field_mask[:,:self.BALLS_BOTTOM])\n mask = cv2.dilate(mask, None, iterations=2)\n cnts = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]\n balls = set()\n for c in cnts:\n (y, x), radius = cv2.minEnclosingCircle(c) # non-swapped\n x = 4320 - x\n y = y*2 # self.BALLS_BOTTOM - y*2\n if radius < 5:\n continue\n M = cv2.moments(c)\n if M[\"m00\"] == 0:\n continue\n relative = PolarPoint(self.x_to_rad(x), self.y_to_dist(y+radius))\n if self.robot and self.orientation:\n absolute = relative.rotate(-self.orientation).translate(self.robot)\n if absolute.x > 4.0 and absolute.y < 0.5 and absolute.y > -0.5:\n continue # skip balls in goal, not really working right now\n else:\n absolute = None\n ball_coords = relative, absolute, int(x), int(y), int(radius)\n balls.add(ball_coords)\n return mask, sorted(balls, key=lambda b:b[0].dist * abs( b[0].angle))\n\n\n def dist_to_y(self, d):\n \"\"\"\n Convert object distance to panorama image y coordinate\n \"\"\"\n return int(640*math.atan2(self.camera_height, d - self.camera_mount_radius)/(0.2*2*math.pi))\n\n def y_to_dist(self, y):\n \"\"\"\n Convert panorama image y coordinate to distance\n \"\"\"\n return self.camera_height / math.tan(y * self.camera_vert_fov_rad / 640) + self.camera_mount_radius\n\n def deg_to_x(self, d):\n \"\"\"\n Convert degrees from the kicker to panorama image x coordinate\n \"\"\"\n d = d % 360\n if d > 180: d -= 360\n return int(d*3840/360+240+1920)-self.KICKER_OFFSET\n\n\n def x_to_deg(self, x):\n x += self.KICKER_OFFSET\n d = (x-2160.0)*360/(3840)\n return d % 360\n\n def x_to_rad(self, x):\n \"\"\"\n Convert panorama image x coordinate to angle in radians from the center of the image\n (angle from the the kicker)\n \"\"\"\n x += self.KICKER_OFFSET\n d = (x-2160.0)*(math.pi*2)/(3840)\n if d > math.pi:\n d -= math.pi * 2\n if d < -math.pi:\n d += math.pi * 2\n return d\n\nimport json\n\nclass ImageRecognizer(ManagedThread):\n def step(self, frame):\n assert frame.shape == (4320, 320, 4), \"Got %s instead\" % repr(frame.shape)\n r = ImageRecognition(frame)\n self.produce(r, self.grabber)\n\n # TODO: Put this in another sensible place, probably as a ImageRecognizer consumer\n if r.robot:\n for websocket in self.websockets:\n websocket.send(json.dumps(dict(\n action=\"position-robot\",\n x=r.robot.x,\n y=r.robot.y)))\n\n","sub_path":"robovision/image_recognition.py","file_name":"image_recognition.py","file_ext":"py","file_size_in_byte":13450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"591204625","text":"# -*- coding: utf-8 -*-\n\"\"\"反馈推送\"\"\"\nimport ujson\nfrom sisyphus.configure import PUSH_TYPE_TEMPLATE\nfrom sisyphus.service import (\n PushType,\n PushStatus,\n)\nfrom sisyphus.service.base import BaseHandler\nfrom sisyphus.models.template import MessageTemplate\nfrom sisyphus.models.history import PushHistory\n\n\nclass FeedbackHandler(BaseHandler):\n push_type = PushType.feedback_notice\n\n def __init__(self):\n super(FeedbackHandler, self).__init__()\n self.template_id = PUSH_TYPE_TEMPLATE[str(self.push_type)]\n\n def gen_payload(self, **kwargs):\n template = MessageTemplate.get(self.template_id)\n payload_data = ujson.loads(template.data)\n title = template.title\n body = template.body.format(arg1=kwargs['arg1'])\n\n payload_data['broadcast'] = '0'\n payload_data['title'] = title\n payload_data['body'] = body\n payload_data['type'] = '2'\n\n self.payload = {\n \"app\": 39,\n \"device\": kwargs['device'],\n \"title\": title,\n \"body\": body,\n \"ttl\": 86400,\n \"data\": payload_data,\n }\n history = PushHistory.add(\n self.push_id, self.payload, 0, 0, '-1', 'system', PushStatus.Begin)\n self.push_id = history.id\n","sub_path":"sisyphus/service/feedback.py","file_name":"feedback.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"651548374","text":"import requests\nimport os\n\napi = os.environ['DHIS_BASEURL'] + '/api/'\ncredentials = (os.environ['DHIS_USERNAME'], os.environ['DHIS_PASSWORD'])\n\nprint('Connecting to DHIS2: ' + api)\n\nres = requests.get(api + 'resources.json', auth=credentials)\nif res.json()['resources'][0]:\n print('Connected to DHIS 2 using ' + api)\n\ndef get(url, params):\n res = requests.get(api + url, auth=credentials, params=params)\n process_status_code(res, 'get')\n return res.json()\n\ndef delete(url):\n res = requests.delete(api + url, auth=credentials)\n process_status_code(res, 'delete')\n\n\ndef post(url, body):\n res = requests.post(api + url, auth=credentials, json=body)\n process_status_code(res, 'post')\n\ndef put(url, body):\n res = requests.put(api + url, auth=credentials, json=body)\n process_status_code(res, 'put')\n\ndef process_status_code(response, method):\n if response.status_code in range(200,210):\n print(method + ' to url ' + response.url + ' successful')\n else:\n print(method + ' to url ' + response.url + ' failed')\n print(response.json())","sub_path":"testData/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"546047840","text":"# Copyright 2021 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Utilities for constructing structured surrogate posteriors.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n# [internal] enable type annotations\nfrom __future__ import print_function\n\nimport collections\n\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_probability.python import util as tfp_util\nfrom tensorflow_probability.python.bijectors import identity\nfrom tensorflow_probability.python.bijectors import restructure\nfrom tensorflow_probability.python.bijectors import scale as scale_lib\nfrom tensorflow_probability.python.bijectors import shift\nfrom tensorflow_probability.python.bijectors import sigmoid\nfrom tensorflow_probability.python.distributions import beta\nfrom tensorflow_probability.python.distributions import half_normal\nfrom tensorflow_probability.python.distributions import independent\nfrom tensorflow_probability.python.distributions import joint_distribution\nfrom tensorflow_probability.python.distributions import joint_distribution_auto_batched\nfrom tensorflow_probability.python.distributions import joint_distribution_coroutine\nfrom tensorflow_probability.python.distributions import sample\nfrom tensorflow_probability.python.distributions import transformed_distribution\nfrom tensorflow_probability.python.distributions import truncated_normal\nfrom tensorflow_probability.python.distributions import uniform\nfrom tensorflow_probability.python.internal import distribution_util\nfrom tensorflow_probability.python.internal import prefer_static as ps\n\nfrom tensorflow.python.util import nest # pylint: disable=g-direct-tensorflow-import\n\n\nRoot = joint_distribution_coroutine.JointDistributionCoroutine.Root\n\n_NON_STATISTICAL_PARAMS = [\n 'name', 'validate_args', 'allow_nan_stats', 'experimental_use_kahan_sum',\n 'reinterpreted_batch_ndims', 'dtype'\n]\n_NON_TRAINABLE_PARAMS = ['low', 'high']\n\nASVIParameters = collections.namedtuple(\n 'ASVIParameters', ['prior_weight', 'mean_field_parameter'])\n\n\ndef _as_trainable_family(distribution):\n \"\"\"Substitutes prior distributions with more easily trainable ones.\"\"\"\n with tf.name_scope('as_trainable_family'):\n\n if isinstance(distribution, half_normal.HalfNormal):\n return truncated_normal.TruncatedNormal(\n loc=0.,\n scale=distribution.scale,\n low=0.,\n high=distribution.scale * 10.)\n elif isinstance(distribution, uniform.Uniform):\n return shift.Shift(distribution.low)(\n scale_lib.Scale(distribution.high - distribution.low)(beta.Beta(\n concentration0=tf.ones(\n distribution.event_shape_tensor(), dtype=distribution.dtype),\n concentration1=1.)))\n else:\n return distribution\n\n\ndef _make_asvi_trainable_variables(prior,\n mean_field=False,\n initial_prior_weight=0.5):\n \"\"\"Generates parameter dictionaries given a prior distribution and list.\"\"\"\n with tf.name_scope('make_asvi_trainable_variables'):\n param_dicts = []\n prior_dists = prior._get_single_sample_distributions() # pylint: disable=protected-access\n for dist in prior_dists:\n original_dist = dist.distribution if isinstance(dist, Root) else dist\n\n substituted_dist = _as_trainable_family(original_dist)\n\n # Grab the base distribution if it exists\n try:\n actual_dist = substituted_dist.distribution\n except AttributeError:\n actual_dist = substituted_dist\n\n new_params_dict = {}\n\n # Build trainable ASVI representation for each distribution's parameters.\n parameter_properties = actual_dist.parameter_properties(\n dtype=actual_dist.dtype)\n\n if isinstance(original_dist, sample.Sample):\n posterior_batch_shape = ps.concat([\n actual_dist.batch_shape_tensor(),\n distribution_util.expand_to_vector(original_dist.sample_shape)\n ], axis=0)\n else:\n posterior_batch_shape = actual_dist.batch_shape_tensor()\n\n for param, value in actual_dist.parameters.items():\n\n if param in (_NON_STATISTICAL_PARAMS +\n _NON_TRAINABLE_PARAMS) or value is None:\n continue\n\n actual_event_shape = parameter_properties[param].shape_fn(\n actual_dist.event_shape_tensor())\n try:\n bijector = parameter_properties[\n param].default_constraining_bijector_fn()\n except NotImplementedError:\n bijector = identity.Identity()\n\n if mean_field:\n prior_weight = None\n else:\n unconstrained_ones = tf.ones(\n shape=ps.concat([\n posterior_batch_shape,\n bijector.inverse_event_shape_tensor(\n actual_event_shape)\n ], axis=0),\n dtype=tf.convert_to_tensor(value).dtype)\n\n prior_weight = tfp_util.TransformedVariable(\n initial_prior_weight * unconstrained_ones,\n bijector=sigmoid.Sigmoid(),\n name='prior_weight/{}/{}'.format(dist.name, param))\n\n # If the prior distribution was a tfd.Sample wrapping a base\n # distribution, we want to give every single sample in the prior its\n # own lambda and alpha value (rather than having a single lambda and\n # alpha).\n if isinstance(original_dist, sample.Sample):\n value = tf.reshape(\n value,\n ps.concat([\n actual_dist.batch_shape_tensor(),\n ps.ones(ps.rank_from_shape(original_dist.sample_shape)),\n actual_event_shape\n ],\n axis=0))\n value = tf.broadcast_to(\n value,\n ps.concat([posterior_batch_shape, actual_event_shape], axis=0))\n new_params_dict[param] = ASVIParameters(\n prior_weight=prior_weight,\n mean_field_parameter=tfp_util.TransformedVariable(\n value,\n bijector=bijector,\n name='mean_field_parameter/{}/{}'.format(dist.name, param)))\n\n param_dicts.append(new_params_dict)\n return param_dicts\n\n\n# TODO(kateslin): Add support for models with prior+likelihood written as\n# a single JointDistribution.\ndef build_asvi_surrogate_posterior(prior,\n mean_field=False,\n initial_prior_weight=0.5,\n name=None):\n \"\"\"Builds a structured surrogate posterior inspired by conjugate updating.\n\n ASVI, or Automatic Structured Variational Inference, was proposed by\n Ambrogioni et al. (2020) [1] as a method of automatically constructing a\n surrogate posterior with the same structure as the prior. It does this by\n reparameterizing the variational family of the surrogate posterior by\n structuring each parameter according to the equation\n ```none\n prior_weight * prior_parameter + (1 - prior_weight) * mean_field_parameter\n ```\n In this equation, `prior_parameter` is a vector of prior parameters and\n `mean_field_parameter` is a vector of trainable parameters with the same\n domain as `prior_parameter`. `prior_weight` is a vector of learnable\n parameters where `0. <= prior_weight <= 1.`. When `prior_weight =\n 0`, the surrogate posterior will be a mean-field surrogate, and when\n `prior_weight = 1.`, the surrogate posterior will be the prior. This convex\n combination equation, inspired by conjugacy in exponential families, thus\n allows the surrogate posterior to balance between the structure of the prior\n and the structure of a mean-field approximation.\n\n Args:\n prior: tfd.JointDistribution instance of the prior.\n mean_field: Optional Python boolean. If `True`, creates a degenerate\n surrogate distribution in which all variables are independent,\n ignoring the prior dependence structure. Default value: `False`.\n initial_prior_weight: Optional float value (either static or tensor value)\n on the interval [0, 1]. A larger value creates an initial surrogate\n distribution with more dependence on the prior structure. Default value:\n `0.5`.\n name: Optional string. Default value: `build_asvi_surrogate_posterior`.\n\n Returns:\n surrogate_posterior: A `tfd.JointDistributionCoroutineAutoBatched` instance\n whose samples have shape and structure matching that of `prior`.\n\n Raises:\n TypeError: The `prior` argument cannot be a nested `JointDistribution`.\n\n ### Examples\n\n Consider a Brownian motion model expressed as a JointDistribution:\n\n ```python\n prior_loc = 0.\n innovation_noise = .1\n\n def model_fn():\n new = yield tfd.Normal(loc=prior_loc, scale=innovation_noise)\n for i in range(4):\n new = yield tfd.Normal(loc=new, scale=innovation_noise)\n\n prior = tfd.JointDistributionCoroutineAutoBatched(model_fn)\n ```\n\n Let's use variational inference to approximate the posterior. We'll build a\n surrogate posterior distribution by feeding in the prior distribution.\n\n ```python\n surrogate_posterior =\n tfp.experimental.vi.build_asvi_surrogate_posterior(prior)\n ```\n\n This creates a trainable joint distribution, defined by variables in\n `surrogate_posterior.trainable_variables`. We use `fit_surrogate_posterior`\n to fit this distribution by minimizing a divergence to the true posterior.\n\n ```python\n losses = tfp.vi.fit_surrogate_posterior(\n target_log_prob_fn,\n surrogate_posterior=surrogate_posterior,\n num_steps=100,\n optimizer=tf.optimizers.Adam(0.1),\n sample_size=10)\n\n # After optimization, samples from the surrogate will approximate\n # samples from the true posterior.\n samples = surrogate_posterior.sample(100)\n posterior_mean = [tf.reduce_mean(x) for x in samples]\n posterior_std = [tf.math.reduce_std(x) for x in samples]\n ```\n\n #### References\n [1]: Luca Ambrogioni, Max Hinne, Marcel van Gerven. Automatic structured\n variational inference. _arXiv preprint arXiv:2002.00643_, 2020\n https://arxiv.org/abs/2002.00643\n\n \"\"\"\n\n with tf.name_scope(name or 'build_asvi_surrogate_posterior'):\n param_dicts = _make_asvi_trainable_variables(\n prior=prior,\n mean_field=mean_field,\n initial_prior_weight=initial_prior_weight)\n def posterior_generator():\n\n prior_gen = prior._model_coroutine() # pylint: disable=protected-access\n dist = next(prior_gen)\n\n i = 0\n try:\n while True:\n original_dist = dist.distribution if isinstance(dist, Root) else dist\n\n if isinstance(original_dist, joint_distribution.JointDistribution):\n # TODO(kateslin): Build inner JD surrogate in\n # _make_asvi_trainable_variables to avoid rebuilding variables.\n raise TypeError(\n 'Argument `prior` cannot be a nested `JointDistribution`.')\n\n else:\n\n original_dist = _as_trainable_family(original_dist)\n\n try:\n actual_dist = original_dist.distribution\n except AttributeError:\n actual_dist = original_dist\n\n dist_params = actual_dist.parameters\n temp_params_dict = {}\n\n for param, value in dist_params.items():\n if param in (_NON_STATISTICAL_PARAMS +\n _NON_TRAINABLE_PARAMS) or value is None:\n temp_params_dict[param] = value\n else:\n prior_weight = param_dicts[i][param].prior_weight\n mean_field_parameter = param_dicts[i][\n param].mean_field_parameter\n if mean_field:\n temp_params_dict[param] = mean_field_parameter\n else:\n temp_params_dict[param] = prior_weight * value + (\n 1. - prior_weight) * mean_field_parameter\n\n if isinstance(original_dist, sample.Sample):\n inner_dist = type(actual_dist)(**temp_params_dict)\n\n surrogate_dist = independent.Independent(\n inner_dist,\n reinterpreted_batch_ndims=ps.rank_from_shape(\n original_dist.sample_shape))\n else:\n surrogate_dist = type(actual_dist)(**temp_params_dict)\n\n if isinstance(original_dist,\n transformed_distribution.TransformedDistribution):\n surrogate_dist = transformed_distribution.TransformedDistribution(\n surrogate_dist, bijector=original_dist.bijector)\n\n if isinstance(original_dist, independent.Independent):\n surrogate_dist = independent.Independent(\n surrogate_dist,\n reinterpreted_batch_ndims=original_dist\n .reinterpreted_batch_ndims)\n\n if isinstance(dist, Root):\n value_out = yield Root(surrogate_dist)\n else:\n value_out = yield surrogate_dist\n\n dist = prior_gen.send(value_out)\n i += 1\n except StopIteration:\n pass\n\n surrogate_posterior = (\n joint_distribution_auto_batched.JointDistributionCoroutineAutoBatched(\n posterior_generator))\n\n # Ensure that the surrogate posterior structure matches that of the prior\n try:\n nest.assert_same_structure(prior.dtype, surrogate_posterior.dtype)\n except TypeError:\n tokenize = lambda jd: jd._model_unflatten( # pylint: disable=protected-access, g-long-lambda\n range(len(jd._model_flatten(jd.dtype))) # pylint: disable=protected-access\n )\n surrogate_posterior = restructure.Restructure(\n output_structure=tokenize(prior),\n input_structure=tokenize(surrogate_posterior))(\n surrogate_posterior)\n\n surrogate_posterior.also_track = param_dicts\n return surrogate_posterior\n","sub_path":"tensorflow_probability/python/experimental/vi/automatic_structured_vi.py","file_name":"automatic_structured_vi.py","file_ext":"py","file_size_in_byte":14432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"502764635","text":"# coding: utf-8\n\n\"\"\"\n Aqua Security Test Api Definition Document Authered By - Shaharuk Shaikh\n\n This document is the api def document api's given to test by Aqua Security \n\n The version of the OpenAPI document: 0.1\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nfrom setuptools import setup, find_packages # noqa: H301\n\nNAME = \"openapi-client\"\nVERSION = \"1.0.0\"\n# To install the library, run the following\n#\n# python setup.py install\n#\n# prerequisite: setuptools\n# http://pypi.python.org/pypi/setuptools\n\nREQUIRES = [\"urllib3 >= 1.15\", \"six >= 1.10\", \"certifi\", \"python-dateutil\", \"pytest\", \"pytest-html\"]\n\nsetup(\n name=NAME,\n version=VERSION,\n description=\"Aqua Security Test Api Definition Document Authered By - Shaharuk Shaikh\",\n author_email=\"\",\n url=\"\",\n keywords=[\"OpenAPI\", \"OpenAPI-Generator\", \"Aqua Security Test Api Definition Document Authered By - Shaharuk Shaikh\"],\n install_requires=REQUIRES,\n packages=find_packages(),\n include_package_data=True,\n long_description=\"\"\"\\\n This document is the api def document api's given to test by Aqua Security \n \"\"\"\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"495224078","text":"import sys\nimport math\nimport re\nfrom collections import defaultdict\n\nr_1 = 0.95\nr_2 = 0.95\nV = 1000000\nW = 0\nH = 0\n\nprobs = defaultdict(lambda: 0)\n\nwith open(sys.argv[1], 'r') as model_file:\n\tfor line in model_file:\n\t\tline = line.strip()\n\t\tline_list = line.split(\"\\t\")\n\t\tprobs[line_list[0]] = float(line_list[1])\n\nwith open(sys.argv[2], 'r') as test_file:\n\tfor line in test_file:\n\t\tline = line.strip().lower()\n\t\tline = re.sub(r' ,', '', line)\n\t\twords = line.split(\" \")\n\t\twords.insert(0, \"\")\n\t\twords.append(\"\")\n\t\tfor i in range(1,len(words)):\n\t\t\tP1 = r_1 * probs[words[i]] + (1 - r_1) / V\n\t\t\tP2 = r_1 * probs[\"{} {}\".format(words[i-1], words[i])] + (1 - r_2) * P1\n\t\t\tH += -math.log(P2, 2)\n\t\t\tW += 1\n\nprint(\"entropy = {}\".format( H / W ))","sub_path":"lifan/tutorial02/test-bigram.py","file_name":"test-bigram.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"170146790","text":"from abc import ABC, abstractmethod\nfrom datetime import timedelta, timezone\nfrom enum import Enum\nfrom functools import lru_cache\nimport logging\n\nimport numpy as np\n\nfrom pyschism.domain import ModelDomain\nfrom pyschism.forcing.tides.tides import Tides\nfrom pyschism.param.param import Param\n\n_logger = logging.getLogger(__name__)\n\n\nclass NullWritter:\n\n def __init__(self, *argv):\n pass\n\n def __str__(self):\n return \"\"\n\n\nclass NotImplementedWritter:\n\n def __init__(self, *argv):\n raise NotImplementedError('Writter for variable is not implemented.')\n\n\nclass TidalVariableWritter(ABC):\n\n def __init__(self, boundary, bctides):\n self.forcing = boundary['forcing']\n self.indexes = boundary['indexes']\n self.active_constituents = bctides.get_active_forcing_constituents()\n self._model_domain = bctides._model_domain\n\n @abstractmethod\n def __str__(self):\n raise NotImplementedError(f'str({self.__name__})')\n\n\nclass TidalElevationWritter(TidalVariableWritter):\n\n def __str__(self):\n f = \"\"\n for constituent in self.active_constituents:\n f += f'{constituent}\\n'\n vertices = self._model_domain.hgrid.get_xy(\n crs='EPSG:4326')[self.indexes, :]\n amp, phase = self.forcing.get_elevation(constituent, vertices)\n for i in range(len(vertices)):\n f += f'{amp[i]:.8e} {phase[i]:.8e}\\n'\n return f\n\n\nclass TidalVelocityWritter(TidalVariableWritter):\n\n def __str__(self):\n f = ''\n for constituent in self.active_constituents:\n f += f'{constituent}\\n'\n vertices = self._model_domain.hgrid.get_xy(\n crs='EPSG:4326')[self.indexes, :]\n uamp, uphase, vamp, vphase = self.forcing.get_velocity(\n constituent, vertices)\n for i in range(len(vertices)):\n f += f'{uamp[i]:.8e} {uphase[i]:.8e} ' \\\n f'{vamp[i]:.8e} {vphase[i]:.8e}\\n'\n return f\n\n\nclass iettypeWritter(Enum):\n NONE = NullWritter\n TIME_VARYING = NotImplementedWritter\n CONSTANT = NotImplementedWritter\n TIDAL = TidalElevationWritter\n SPACE_TIME_VARYING = NotImplementedWritter\n TIDAL_AND_SPACE_TIME_VARYING = NotImplementedWritter\n\n\nclass ifltypeWritter(Enum):\n NONE = NullWritter\n TIME_VARYING = NotImplementedWritter\n CONSTANT = NotImplementedWritter\n TIDAL = TidalVelocityWritter\n SPACE_TIME_VARYING = NotImplementedWritter\n TIDAL_AND_SPACE_TIME_VARYING = NotImplementedWritter\n\n\nclass itetypeWritter(Enum):\n NONE = NullWritter\n TIME_VARYING = NotImplementedWritter\n CONSTANT = NotImplementedWritter\n INITIAL_PROFILE_FOR_INFLOW = NotImplementedWritter\n INPUT_3D = NotImplementedWritter\n\n\nclass isatypeWritter(Enum):\n NONE = NullWritter\n TIME_VARYING = NotImplementedWritter\n CONSTANT = NotImplementedWritter\n INITIAL_PROFILE_FOR_INFLOW = NotImplementedWritter\n INPUT_3D = NotImplementedWritter\n\n\nclass itrtypeWritter(Enum):\n NONE = NullWritter\n TIME_VARYING = NotImplementedWritter\n CONSTANT = NotImplementedWritter\n INITIAL_PROFILE_FOR_INFLOW = NotImplementedWritter\n INPUT_3D = NotImplementedWritter\n\n\nclass Bctides:\n\n def __init__(self, model_domain: ModelDomain, param: Param,\n cutoff_depth: float = 50.):\n \"\"\"Provides an interface to write bctides.in to file. \"\"\"\n _logger.info('Initializing Bctides.')\n # check if start_date was given in case tidal forcings are requested.\n # Note: This is done twice so that this class can be used independently\n # from Param to just write bctides files\n afc = model_domain.get_active_forcing_constituents()\n if len(afc) > 0 and param.opt.start_date is None:\n raise Exception('start_date argument is required for simulating '\n 'tidal forcing.')\n\n self._model_domain = model_domain\n self._param = param\n self._cutoff_depth = cutoff_depth\n\n # init the main tidal forcing object\n tides = Tides()\n for const in tides.all_constituents:\n tides.use_constituent(\n const,\n potential=True if const in\n self.get_active_potential_constituents() else False,\n forcing=True if const in\n self.get_active_forcing_constituents() else False\n )\n self.__tidal_forcing = tides\n\n def __str__(self):\n f = f\"{self.start_date}\\n\" \\\n f\"{self.ntip} {self._cutoff_depth}\\n\"\n if self.ntip > 0:\n for constituent in self.get_active_potential_constituents():\n forcing = self.tidal_forcing(\n self.start_date, self.rnday, constituent)\n f += f'{constituent} \\n' \\\n f'{forcing[0]:G} ' \\\n f\"{forcing[1]:G} \" \\\n f'{forcing[2]:G} ' \\\n f'{forcing[3]:G} ' \\\n f'{forcing[4]:G}\\n'\n afc = self._model_domain.get_active_forcing_constituents()\n f += f'{len(afc):d}\\n'\n for constituent in afc:\n forcing = self.tidal_forcing(\n self.start_date, self.rnday, constituent)\n f += f'{constituent} \\n' \\\n f\"{forcing[2]:G} \" \\\n f'{forcing[3]:G} ' \\\n f'{forcing[4]:G}\\n'\n f += f\"{len(self._model_domain.open_boundaries)}\\n\" # nope\n for id, data in self._model_domain.open_boundaries:\n f += f\"{len(data['indexes'])} \" \\\n f'{str(data[\"forcing\"])}\\n' \\\n f'{iettypeWritter[data[\"forcing\"].iettype.name].value(data, self)}' \\\n f'{ifltypeWritter[data[\"forcing\"].ifltype.name].value(data, self)}' \\\n f'{itetypeWritter[data[\"forcing\"].itetype.name].value(data, self)}' \\\n f'{isatypeWritter[data[\"forcing\"].isatype.name].value(data, self)}' \\\n f'{itrtypeWritter[data[\"forcing\"].itrtype.name].value(data, self)}'\n return f\n\n @lru_cache(maxsize=1)\n def get_active_potential_constituents(self):\n # PySCHISM allows the user to input the tidal potentials and forcings\n # individually at each boundary, however, SCHISM supports only a global\n # specification. Here, we collect all the activated tidal potentials\n # on each boundary and activate them all globally\n # set active tidal potential constituents\n const = dict()\n for id, data in self._model_domain.open_boundaries:\n forcing = data['forcing']\n if isinstance(forcing, Tides):\n for active in forcing.get_active_potential_constituents():\n const[active] = True\n return tuple(const.keys())\n\n @lru_cache(maxsize=1)\n def get_active_forcing_constituents(self):\n # set active tidal forcing constituents\n const = dict()\n for id, data in self._model_domain.open_boundaries:\n forcing = data['forcing']\n if isinstance(forcing, Tides):\n for active in forcing.get_active_forcing_constituents():\n const[active] = True\n return tuple(const.keys())\n\n def write(self, path, overwrite=False):\n with open(path, 'w') as f:\n f.write(str(self))\n\n @property\n def start_date(self):\n return self._param.opt.start_date\n\n @property\n def rnday(self):\n return self._param.core.rnday\n\n @property\n def ntip(self):\n return len(self.get_active_potential_constituents())\n\n @property\n def tidal_forcing(self):\n return self.__tidal_forcing\n\n @property\n def start_date_utc(self):\n if self.start_date.tzinfo is not None and \\\n self.start_date.tzinfo.utcoffset(self.start_date) is not None:\n return self.__start_date.astimezone(timezone(timedelta(0)))\n else:\n return self.__start_date\n","sub_path":"pyschism/forcing/tides/bctides.py","file_name":"bctides.py","file_ext":"py","file_size_in_byte":8005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"214954323","text":"import logging\nfrom decimal import Decimal\n\nfrom bs4 import BeautifulSoup\n\nfrom storescraper.categories import GAMING_CHAIR, NOTEBOOK, MONITOR, \\\n HEADPHONES, KEYBOARD\nfrom storescraper.product import Product\nfrom storescraper.store import Store\nfrom storescraper.utils import session_with_proxy, remove_words\n\n\nclass EliteGamers(Store):\n @classmethod\n def categories(cls):\n return [\n GAMING_CHAIR,\n NOTEBOOK,\n MONITOR,\n HEADPHONES,\n KEYBOARD\n ]\n\n @classmethod\n def discover_urls_for_category(cls, category, extra_args=None):\n url_extensions = [\n ['sillas', GAMING_CHAIR],\n ['notebook', NOTEBOOK],\n ['monitores', MONITOR],\n ['audifonos', HEADPHONES],\n ['teclados-y-mouse', KEYBOARD]\n ]\n session = session_with_proxy(extra_args)\n product_urls = []\n for url_extension, local_category in url_extensions:\n if local_category != category:\n continue\n page = 1\n while True:\n if page > 10:\n raise Exception('page overlfow: ' + url_extension)\n\n url_webpage = 'https://elitegamers.cl/{}/page/{}/'.format(\n url_extension, page)\n print(url_webpage)\n response = session.get(url_webpage)\n soup = BeautifulSoup(response.text, 'html.parser')\n product_containers = soup.findAll('div', 'product-grid-item')\n if not product_containers:\n if page == 1:\n logging.warning('Empty cateogry: ' + url_extension)\n break\n for container in product_containers:\n product_url = container.find('a')['href']\n product_urls.append(product_url)\n page += 1\n return product_urls\n\n @classmethod\n def products_for_url(cls, url, category=None, extra_args=None):\n print(url)\n session = session_with_proxy(extra_args)\n response = session.get(url)\n soup = BeautifulSoup(response.text, 'html.parser')\n name = soup.find('h1', 'product_title').text\n sku = soup.find('button', {'name': 'add-to-cart'})['value']\n if soup.find('p', 'stock in-stock'):\n stock = int(soup.find('p', 'stock').text.split()[0])\n else:\n stock = 0\n if soup.find('p', 'price').find('ins'):\n price = Decimal(\n remove_words(soup.find('p', 'price').find('ins').text))\n else:\n price = Decimal(\n remove_words(soup.find('p', 'price').text))\n\n picture_urls = [tag['src'] for tag in soup.find('div', 'woocommerce'\n '-product'\n '-gallery').\n findAll('img')]\n p = Product(\n name,\n cls.__name__,\n category,\n url,\n url,\n sku,\n stock,\n price,\n price,\n 'CLP',\n sku=sku,\n picture_urls=picture_urls\n )\n return [p]\n","sub_path":"storescraper/stores/elite_gamers.py","file_name":"elite_gamers.py","file_ext":"py","file_size_in_byte":3277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"216807491","text":"import os\nimport sys\nimport ycm_core\nfrom clang_helpers import PrepareClangFlags\n\n# if no database is found, use this\ndefault_flags = [\n '-Wall',\n]\n\n# to be added to _all_ returned flags, always\nmandatory_flags = [\n '-Wall',\n '-Wextra',\n '-Werror',\n '-Wpedantic',\n '-Wno-gnu-statement-expression',\n '-Wno-variadic-macros',\n '-fexceptions',\n '-DUSE_CLANG_COMPLETER',\n # THIS IS IMPORTANT! Without a \"-std=\" flag, clang won't know which\n # language to use when compiling headers. So it will guess. Badly. So C++\n # headers will be compiled as C headers. You don't want that so ALWAYS specify\n # a \"-std=\".\n # For a C project, you would set this to something like 'c99' instead of\n # 'c++11'.\n '-std=c++11',\n # ...and the same thing goes for the magic -x option which specifies the\n # language that the files to be compiled are written in. This is mostly\n # relevant for c++ headers.\n # For a C project, you would set this to 'c' instead of 'c++'.\n '-x', 'c++',\n # This path will only work on OS X, but extra paths that don't exist are not\n # harmful\n '-isystem', '/System/Library/Frameworks/Python.framework/Headers',\n '-isystem', '/usr/local/include',\n '-isystem', '/usr/local/include/eigen3',\n '-I', 'include',\n '-I.',\n]\n\n# flags which should never be forwarded to clang (if they are not supported for\n# example)\nto_be_removed_flags = [\n '-Wno-maybe-uninitialized',\n '-Wno-unused-local-typedefs',\n]\n\n# a default\ncompilation_database_folder = '.'\n\nif os.path.isfile( compilation_database_folder+'/compile_commands.json' ):\n database = ycm_core.CompilationDatabase( compilation_database_folder )\nelse:\n database = None\n\nSOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', ]\n\n# thats it. now some functions\n\ndef MakeRelativePathsInFlagsAbsolute( flags, working_directory ):\n if not working_directory:\n return list( flags )\n new_flags = []\n make_next_absolute = False\n path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]\n for flag in flags:\n new_flag = flag\n\n if make_next_absolute:\n make_next_absolute = False\n if not flag.startswith( '/' ):\n new_flag = os.path.join( working_directory, flag )\n\n for path_flag in path_flags:\n if flag == path_flag:\n make_next_absolute = True\n break\n\n if flag.startswith( path_flag ):\n path = flag[ len( path_flag ): ]\n new_flag = path_flag + os.path.join( working_directory, path )\n break\n\n if new_flag:\n new_flags.append( new_flag )\n return new_flags\n\n\ndef IsHeaderFile( filename ):\n extension = os.path.splitext( filename )[ 1 ]\n return extension in [ '.h', '.hxx', '.hpp', '.hh' ]\n\ndef find(name, path):\n #sys.stderr.write(\"looking for \" + name + \" in \" + path + \"\\n\") \n for root, dirs, files in os.walk(path):\n if name in files:\n return os.path.join(root, name)\n return None\n\ndef GetCompilationInfoForFile( filename ):\n # The compilation_commands.json file generated by CMake does not have entries\n # for header files. So we do our best by asking the db for flags for a\n # corresponding source file with the same basename, if any. If one exists,\n # the flags for that file should be good enough.\n if IsHeaderFile( filename ):\n path = os.path.split( filename )[ 0 ] + '/../../../' # forgive me, a very crude heuristic...\n basename = os.path.basename(os.path.splitext( filename )[ 0 ])\n for extension in SOURCE_EXTENSIONS:\n # looking up a file in the filesystem\n replacement_file = find(basename + extension, path)\n if type(replacement_file) is str:\n replacement_file = os.path.abspath(replacement_file)\n if os.path.exists( replacement_file ):\n compilation_info = database.GetCompilationInfoForFile( replacement_file )\n if compilation_info.compiler_flags_:\n return compilation_info\n return None\n return database.GetCompilationInfoForFile( filename )\n\n\ndef FlagsForFile( filename, **kwargs ):\n final_flags = default_flags \n if database:\n # Bear in mind that compilation_info.compiler_flags_ does NOT return a\n # python list, but a \"list-like\" StringVec object\n compilation_info = GetCompilationInfoForFile( filename )\n if compilation_info:\n\n final_flags = PrepareClangFlags(\n MakeRelativePathsInFlagsAbsolute(\n compilation_info.compiler_flags_,\n compilation_info.compiler_working_dir_ ),\n filename)\n\n # python is a little bit special... ;-) \n # see http://stackoverflow.com/a/2104348\n clean_flags = [x for x in final_flags if x not in to_be_removed_flags]\n\n return {\n 'flags': mandatory_flags + clean_flags,\n 'do_cache': True\n }\n","sub_path":".ycm_extra_conf.py","file_name":".ycm_extra_conf.py","file_ext":"py","file_size_in_byte":4719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"78282852","text":"#coding=utf8\n__author__ = 'alex'\n\nimport logging as log\nfrom flask import Blueprint, render_template, abort, g, request, redirect, url_for, session, flash\nfrom jinja2 import TemplateNotFound\n\nadmin = Blueprint('admin', __name__,template_folder='templates',url_prefix='/admin')\n\n\n@admin.route(\"/\")\ndef index():\n return render_template(\"admin/index.html\", **locals())\n\n@admin.route(\"/login\",methods=[\"POST\",\"GET\"])\ndef login():\n if request.method == \"POST\":\n return redirect(url_for(\"admin.index\"))\n return render_template(\"admin/admin_login.html\", **locals())","sub_path":"admin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"548436194","text":"#\n# Lead acid base model class\n#\n\nimport pybamm\n\n\nclass BaseModel(pybamm.BaseBatteryModel):\n \"\"\"\n Overwrites default parameters from Base Model with default parameters for\n lead-acid models\n\n\n **Extends:** :class:`pybamm.BaseBatteryModel`\n\n \"\"\"\n\n def __init__(self, options=None, name=\"Unnamed lead-acid model\", build=False):\n options = options or {}\n # Specify that there are no particles in lead-acid\n options[\"particle shape\"] = \"no particles\"\n super().__init__(options, name)\n self.param = pybamm.LeadAcidParameters()\n\n # Default timescale is discharge timescale\n self.timescale = self.param.tau_discharge\n\n # Set default length scales\n self.length_scales = {\n \"negative electrode\": self.param.L_x,\n \"separator\": self.param.L_x,\n \"positive electrode\": self.param.L_x,\n \"current collector y\": self.param.L_z,\n \"current collector z\": self.param.L_z,\n }\n\n self.set_standard_output_variables()\n\n @property\n def default_parameter_values(self):\n return pybamm.ParameterValues(chemistry=pybamm.parameter_sets.Sulzer2019)\n\n @property\n def default_geometry(self):\n return pybamm.battery_geometry(\n include_particles=False,\n current_collector_dimension=self.options[\"dimensionality\"],\n )\n\n @property\n def default_var_pts(self):\n # Choose points that give uniform grid for the standard parameter values\n var = pybamm.standard_spatial_vars\n return {var.x_n: 25, var.x_s: 41, var.x_p: 34, var.y: 10, var.z: 10}\n\n def set_soc_variables(self):\n \"\"\"Set variables relating to the state of charge.\"\"\"\n # State of Charge defined as function of dimensionless electrolyte concentration\n z = pybamm.standard_spatial_vars.z\n soc = (\n pybamm.Integral(self.variables[\"X-averaged electrolyte concentration\"], z)\n * 100\n )\n self.variables.update({\"State of Charge\": soc, \"Depth of Discharge\": 100 - soc})\n\n # Fractional charge input\n if \"Fractional Charge Input\" not in self.variables:\n fci = pybamm.Variable(\"Fractional Charge Input\", domain=\"current collector\")\n self.variables[\"Fractional Charge Input\"] = fci\n self.rhs[fci] = -self.variables[\"Total current density\"] * 100\n self.initial_conditions[fci] = self.param.q_init * 100\n\n def set_active_material_submodel(self):\n self.submodels[\"negative active material\"] = pybamm.active_material.Constant(\n self.param, \"Negative\", self.options\n )\n self.submodels[\"positive active material\"] = pybamm.active_material.Constant(\n self.param, \"Positive\", self.options\n )\n\n def set_sei_submodel(self):\n\n self.submodels[\"negative sei\"] = pybamm.sei.NoSEI(self.param, \"Negative\")\n self.submodels[\"positive sei\"] = pybamm.sei.NoSEI(self.param, \"Positive\")\n\n def set_lithium_plating_submodel(self):\n\n self.submodels[\"negative lithium plating\"] = pybamm.lithium_plating.NoPlating(\n self.param, \"Negative\"\n )\n self.submodels[\"positive lithium plating\"] = pybamm.lithium_plating.NoPlating(\n self.param, \"Positive\"\n )\n","sub_path":"pybamm/models/full_battery_models/lead_acid/base_lead_acid_model.py","file_name":"base_lead_acid_model.py","file_ext":"py","file_size_in_byte":3310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"111431128","text":"# WHY NOT SHUFFLE FOR THE TRAIN LOADERS?\n# WHAT IS THE DIFFERENCE BETWEEN TEST SET 1 and TEST SET 2?\nimport sys \nimport torch\nimport numpy as np\nimport nibabel as nib\nimport os\nimport os.path as osp\nimport pandas as pd \n\nsys.path.append('../../')\nfrom data.dataset import give_mri_data\n\n\n\ndef give_oasis_data(data_type,batch_size=1,num_workers=1,shuffle=True,debug=False,preprocessing='full', task='age',share=None,balance=False):\n \n #Get the directory of the data_type:\n DIR = '/gpfs3/well/win-fmrib-analysis/users/lhw539/oasis3/'\n DIR_IDs=osp.join(DIR, 'oasis3_info/') \n \n if balance and task=='sex':\n balance='_balanced_'+task\n else: \n balance=''\n \n if task=='progmci':\n if data_type=='train':\n fp_ = DIR_IDs+'mci_train.csv'\n elif data_type=='val':\n fp_ = DIR_IDs+'mci_valid.csv'\n elif data_type=='test':\n fp_ = DIR_IDs+'mci_test.csv'\n else: \n sys.exit(\"Unknown data type.\")\n else:\n default_name=DIR_IDs\n if debug:\n default_name+='debug_'\n # Load files:\n if data_type=='train':\n fp_ = default_name+'session_train'+balance+'.csv'\n elif data_type=='val':\n fp_ = default_name+'session_val'+balance+'.csv'\n elif data_type=='test':\n fp_ = default_name+'session_test0'+balance+'.csv'\n elif data_type=='test1':\n fp_ = default_name+'session_test1'+balance+'.csv'\n else: \n sys.exit(\"Unknown data type.\")\n \n df_session = pd.read_csv(fp_)\n print(list(df_session.columns))\n #Load T1 file path values. In the special ase of test1 (dementia group),\n #we use only the scans with highest CDR score.\n if data_type=='test1':\n fp_ = osp.join(DIR_IDs, 'subject_test1.csv')\n if debug:\n fp_=osp.join('debug_',fp_)\n df_subject_test1 = pd.read_csv(fp_)\n fp_list = list(df_subject_test1.max_cdr_mri_T1_path.values)\n else: \n fp_list = list(df_session.T1_path.values)\n \n if task=='age':\n label_list = list([age_, ] for age_ in df_session.AgeBasedOnClinicalData.values)\n \n elif task=='sex':\n \n #Get subject info:\n subject_df=pd.read_csv(DIR_IDs+'subject_info.csv')\n \n #Extract subject and sex info. Set subject as index:\n subject_sex=subject_df[[\"Subject\",\"Sex\"]].set_index(\"Subject\")\n \n #Extract labels:\n label_list=list([sex_, ] for sex_ in subject_sex.loc[df_session.Subject.values].values) \n \n elif task=='progmci':\n label_list = list([progmci_, ] for progmci_ in df_session.ProgMCI.values)\n else: \n sys.exit(\"Unknown task.\")\n\n if share is not None: \n n_total=len(fp_list)\n n_samples=int(np.round(share*n_total))\n inds=torch.randperm(n=n_total)[:n_samples].tolist()\n fp_list=[fp_list[ind] for ind in inds]\n label_list=[label_list[ind] for ind in inds]\n \n if debug: \n if share is None: \n print(\"Loading OASIS %5s debug data.\"%data_type)\n else: \n print(\"Loading share %.2f OASIS %5s debug data.\"%(share,data_type))\n else: \n if share is None: \n print(\"Loading OASIS %5s data.\"%data_type)\n else: \n print(\"Loading share %.2f %5s data.\"%(share,data_type))\n \n if balance:\n print(\"The data was balanced for \", task, \".\")\n \n return(give_mri_data(fp_list=fp_list,label_list=label_list,data_type=data_type,batch_size=batch_size,num_workers=num_workers,shuffle=shuffle,preprocessing=preprocessing))\n\n\n\n","sub_path":"data/oasis/load_oasis3.py","file_name":"load_oasis3.py","file_ext":"py","file_size_in_byte":3628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"226266319","text":"import functools\n\nfrom abc import ABC, abstractmethod\nimport numpy as np\nimport os\nfrom typing import Tuple\nimport tensorflow as tf\nfrom ray.rllib.models.tf.tf_modelv2 import TFModelV2\nfrom ray.rllib.agents.ppo.ppo import PPOTrainer\nfrom ray.rllib.agents.ppo.ppo_tf_policy import KLCoeffMixin, PPOLoss, PPOTFPolicy\nfrom ray.rllib.evaluation.postprocessing import Postprocessing, compute_advantages\nfrom ray.rllib.policy.sample_batch import SampleBatch\nfrom ray.rllib.policy.tf_policy import EntropyCoeffSchedule, LearningRateSchedule\nfrom ray.rllib.utils.explained_variance import explained_variance\nfrom ray.tune import register_trainable\n\nOTHER_AGENT = \"other_agent\"\n\n\nclass CentralizedCriticModel(ABC, TFModelV2):\n \"\"\"Multi-agent model that implements a centralized VF.\"\"\"\n\n def __init__(self, obs_space, action_space, num_outputs, model_config, name):\n super(CentralizedCriticModel, self).__init__(\n obs_space, action_space, num_outputs, model_config, name\n ) # The Method Resolution Order (MRO) will manage the dependencies.\n\n # env parameters\n self.obs_space_shape = obs_space.shape[0]\n self.act_space_shape = action_space.n\n self.centralized = model_config[\"custom_options\"][\"critic\"][\"centralized\"]\n self.max_num_agents = model_config[\"custom_options\"][\"max_num_agents\"]\n self.max_num_opponents = self.max_num_agents - 1\n self.debug_mode = True\n\n # Build the actor network\n self.actor = self._build_actor(**model_config[\"custom_options\"][\"actor\"])\n self.register_variables(self.actor.variables)\n\n # Central Value Network\n self.critic = self._build_critic(**model_config[\"custom_options\"][\"critic\"])\n self.register_variables(self.critic.variables)\n\n # summaries\n if self.debug_mode:\n print(\"Actor Model:\\n\", self.actor.summary())\n print(\"Critic Model:\\n\", self.critic.summary())\n\n @abstractmethod\n def _build_actor(self, **kwargs) -> tf.keras.Model:\n pass\n\n @abstractmethod\n def _build_critic(self, **kwargs) -> tf.keras.Model:\n pass\n\n def forward(self, input_dict, state, seq_lens):\n policy = self.actor(input_dict[\"obs_flat\"])\n self._value_out = tf.reduce_mean(input_tensor=policy, axis=-1) # not used\n return policy, state\n\n def central_value_function(self, obs, other_agent):\n if self.centralized:\n return tf.reshape(self.critic([obs, other_agent]), [-1])\n return tf.reshape(self.critic(obs), [-1])\n\n def value_function(self):\n return tf.reshape(self._value_out, [1]) # not used\n\n\nclass CcTransformer(CentralizedCriticModel):\n \"\"\"Multi-agent model that implements a centralized VF.\"\"\"\n\n def _build_actor(\n self, activation_fn=\"relu\", hidden_layers=[512, 512, 512], **kwargs\n ):\n inputs = tf.keras.layers.Input(shape=(self.obs_space_shape,), name=\"obs\")\n output = build_fullyConnected(\n inputs=inputs,\n hidden_layers=hidden_layers,\n num_outputs=self.act_space_shape,\n activation_fn=activation_fn,\n name=\"actor\",\n )\n\n return tf.keras.Model(inputs, output)\n\n def _build_critic(\n self,\n activation_fn=\"relu\",\n hidden_layers=[512, 512, 512],\n centralized=True,\n embedding_size=128,\n num_heads=8,\n d_model=256,\n use_scale=True,\n **kwargs,\n ):\n # agent's input\n agent_obs = tf.keras.layers.Input(shape=(self.obs_space_shape,), name=\"obs\")\n agent_embedding = build_fullyConnected(\n inputs=agent_obs,\n hidden_layers=[2 * embedding_size, embedding_size],\n num_outputs=embedding_size,\n activation_fn=activation_fn,\n name=\"agent_embedding\",\n ) # `[batch_size, embedding_size]`\n\n # opponents' input\n opponent_shape = (\n (self.obs_space_shape + self.act_space_shape) * self.max_num_opponents,\n )\n opponent_obs = tf.keras.layers.Input(shape=opponent_shape, name=\"other_agent\")\n opponent_input = tf.reshape(\n opponent_obs,\n [-1, self.max_num_opponents, self.obs_space_shape + self.act_space_shape],\n )\n\n # opponents' embedding\n # `[batch_size, self.max_num_opponents, embedding_size]`\n opponent_embedding = build_fullyConnected(\n inputs=opponent_input,\n hidden_layers=[2 * embedding_size, embedding_size],\n num_outputs=embedding_size,\n activation_fn=activation_fn,\n name=\"opponent_embedding\",\n )\n\n # multi-head attention\n queries = tf.expand_dims(agent_embedding, axis=1) # number of queries = 1\n\n # output shape: `[batch_size, 1, d_model]`\n opponents_embedding = MultiHeadAttentionLayer(\n num_heads=num_heads, d_model=d_model, use_scale=use_scale\n )(\n [queries, opponent_embedding, opponent_embedding]\n ) # `[q, k, v]`\n\n # remove the addtional dimension\n opponents_embedding = tf.squeeze(opponents_embedding, axis=1)\n\n # `[batch_size, embedding_size + d_model]`\n embeddings = tf.concat([agent_embedding, opponents_embedding], axis=-1)\n\n output = build_fullyConnected(\n inputs=embeddings,\n hidden_layers=hidden_layers,\n num_outputs=1,\n activation_fn=activation_fn,\n name=\"critic\",\n ) # `[batch_size, ]`\n return tf.keras.Model([agent_obs, opponent_obs], output)\n\n\ndef build_fullyConnected(\n inputs, hidden_layers, num_outputs, activation_fn=\"relu\", name=None\n):\n name = name or \"fc_network\" # default_name\n\n # Fully connected hidden layers\n x = inputs\n for k, layer_size in enumerate(hidden_layers):\n x = tf.keras.layers.Dense(\n layer_size,\n name=\"{}/fc_{}\".format(name, k),\n activation=activation_fn,\n kernel_initializer=tf.keras.initializers.glorot_normal(),\n bias_initializer=tf.keras.initializers.constant(0.1),\n )(x)\n\n # output layer\n output = tf.keras.layers.Dense(\n num_outputs,\n name=\"{}/fc_out\".format(name),\n activation=None,\n kernel_initializer=tf.keras.initializers.glorot_normal(),\n bias_initializer=tf.keras.initializers.constant(0.1),\n )(x)\n\n return output\n\n\nclass MultiHeadAttentionLayer(tf.keras.layers.Layer):\n \"\"\"Multi-Head Attention layer as described in https://arxiv.org/abs/1910.06764.\n\n The Multi-Head Attention (MHA) submodule computes in parallel H soft-attention\n operations for every element, producing an output tensor Y of shape [N, D].\n MHA operates by first calculating the query Q, keys K, and values V through\n trainable linear projections and then using the combined Q, K, V, tensors\n to compute the soft attention. A residual connection to the resulting embedding\n is then applied and finally layer normalization.\n args:\n d_model: `int`, output dimension.\n num_heads: `int`, the number of heads.\n use_scale: 'bool', learnable scaling factor (default=True).\n use_residual_connection: `bool`, add a residual connection (default=False).\n If three inputs are provided during `call`, this value is overwritten to `False`.\n use_layer_norm: `bool`, use layer norm at the end (default=True).\n \"\"\"\n\n def __init__(\n self,\n num_heads: int,\n d_model: int,\n use_scale: bool = True,\n use_residual_connection: bool = False,\n use_layer_norm: bool = True,\n **kwargs,\n ):\n super(MultiHeadAttentionLayer, self).__init__(**kwargs)\n self.num_heads = num_heads\n self.d_model = d_model\n self.use_scale = use_scale\n self.use_residual_connection = use_residual_connection\n self.use_layer_norm = use_layer_norm\n\n if d_model % self.num_heads != 0:\n raise ValueError(\n \"the model dimension (got {}) must be a multiple \"\n \"of the number of heads, got {} heads\".format(d_model, num_heads)\n )\n\n self.depth = d_model // self.num_heads\n\n self.wq = tf.keras.layers.Dense(d_model, use_bias=True)\n self.wk = tf.keras.layers.Dense(d_model, use_bias=True)\n self.wv = tf.keras.layers.Dense(d_model, use_bias=True)\n\n self.attention_layer = AttentionLayer(use_scale=self.use_scale)\n self.transition_layer = tf.keras.layers.Dense(d_model)\n\n if self.use_layer_norm:\n self.layer_norm = tf.keras.layers.LayerNormalization()\n\n def _split_heads(self, inputs):\n \"\"\"Split the last dimension into (num_heads, depth).\n Concatenate the result such that the shape becomes `[batch_size * num_heads, T, depth]`\n \"\"\"\n inputs = tf.concat(tf.split(inputs, self.num_heads, axis=-1), axis=0)\n return inputs\n # inputs = tf.reshape(inputs, (batch_size, -1, self.num_heads, self.depth))\n # return tf.transpose(x, perm=[0, 2, 1, 3])\n\n def call(self, inputs):\n \"\"\" Execute a multi-headed attention mechanism.\n\n Args:\n inputs: `list` with\n query: Optional, Query `Tensor` of shape `[batch_size, Tq, dim]`.\n If the query is not provided, it assumes to be equal to value\n , and a self-attention mechanism is applied. Consequently,\n `dim` must equal `d_model`.\n key: Optional, Key `Tensor` of shape `[batch_size, Tv, dim]`.\n If the key is not provided, it assumes to be equal to value.\n value: Value `Tensor` of shape `[batch_size, Tv, dim]`.\n\n Returns:\n Attention outputs of shape `[batch_size, Tq, dim]`.\n \"\"\"\n self._validate_call_args(inputs)\n\n # decompose the inputs\n if len(inputs) == 1:\n q = k = v = inputs[0]\n else:\n q, k, v = inputs[0], inputs[1], inputs[2]\n self.use_residual_connection = False\n\n q = self.wq(q) # `[batch_size, Tq, d_model]`\n k = self.wk(k) # `[batch_size, Tk, d_model]`\n v = self.wv(v) # `[batch_size, Tv, d_model]`\n\n # `[batch_size * num_heads, T, d_model/h]`\n q = self._split_heads(q)\n k = self._split_heads(k)\n v = self._split_heads(v)\n\n # TODO - check the shape because I think it will be num_heads * dim_value\n scaled_attention = self.attention_layer([q, k, v])\n\n # Restore the shapes to `[batch_size, Tq, d_model]`\n outputs = tf.concat(tf.split(scaled_attention, self.num_heads, axis=0), axis=-1)\n\n # element-wise transition function\n outputs = self.transition_layer(outputs)\n\n # residual connection\n if self.use_residual_connection:\n outputs = outputs + inputs[0]\n\n # normalize the activations of each element individually.\n if self.use_layer_norm:\n outputs = self.layer_norm(outputs)\n\n return outputs\n\n def _validate_call_args(self, inputs):\n \"\"\"Validates arguments of the call method.\"\"\"\n class_name = self.__class__.__name__\n if not isinstance(inputs, list):\n raise ValueError(\n \"{} layer must be called on a list of inputs, namely \"\n \"[value], or [query, value], or [query, key, value].\".format(class_name)\n )\n if not (len(inputs) == 1 or len(inputs) == 3):\n raise ValueError(\n \"{} layer accepts inputs list of length 1 or 3, \"\n \"namely [value] or [query, key, value]. \"\n \"Given length: {}\".format(class_name, len(inputs))\n )\n if len(inputs) == 1 and self.use_residual_connection:\n if inputs[0].shape[-1] != self.d_model:\n raise ValueError(\n \"When providing only one input, its last \"\n \"dimension must equal `d_model`\"\n )\n\n def get_config(self):\n config = super(AttentionLayer, self).get_config()\n config.update(\n num_heads=self.num_heads,\n d_model=self.d_model,\n use_scale=self.use_scale,\n use_residual_connection=self.use_residual_connection,\n use_layer_norm=self.use_layer_norm,\n )\n return config\n\n @classmethod\n def from_config(cls, config):\n return cls(**config)\n\n\nclass AttentionLayer(tf.keras.layers.Layer):\n def __init__(self, use_scale=True, **kwargs):\n super(AttentionLayer, self).__init__(**kwargs)\n self.use_scale = use_scale\n\n def build(self, input_shape):\n \"\"\"Additonal scaling factor: sqrt(dk) or learnable\"\"\"\n if self.use_scale:\n self.scale = self.add_weight(\n name=\"scale\",\n shape=(),\n initializer=tf.constant_initializer(1.0),\n trainable=True,\n )\n else:\n self.scale = 1.0 / tf.sqrt(tf.cast(input_shape[0][-1], tf.float32))\n\n def call(self, inputs):\n \"\"\"Applies the attention mechanism.\n\n Args:\n inputs: `list` containing,\n query: Query `Tensor` of shape `[batch_size, Tq, dim]`.\n key: Key `Tensor` of shape `[batch_size, Tv, dim]`.\n value: Value `Tensor` of shape `[batch_size, Tv, dim]`.\n\n Returns:\n Tensor of shape `[batch_size, Tq, dim]`.\n \"\"\"\n self._validate_call_args(inputs=inputs)\n\n q, k, v = inputs[0], inputs[1], inputs[2]\n\n scores = self._calculate_scores(query=q, key=k)\n result = self._apply_scores(scores=scores, value=v)\n return result\n\n def _calculate_scores(self, query, key):\n \"\"\"Calculates attention scores as a query-key dot product.\n Args:\n query: Query `Tensor` of shape `[batch_size, Tq, dim]`.\n key: Key `Tensor` of shape `[batch_size, Tv, dim]`.\n Returns:\n Tensor of shape `[batch_size, Tq, Tv]`.\n \"\"\"\n return self.scale * tf.matmul(query, key, transpose_b=True, name=\"scores\")\n\n def _apply_scores(self, scores, value):\n \"\"\"Applies attention scores to the given value tensor.\n\n Args:\n scores: Scores `Tensor` of shape `[batch_size, Tq, Tv]`.\n value: Value `Tensor` of shape `[batch_size, Tv, dim]`.\n\n Returns:\n Tensor of shape `[batch_size, Tq, dim]`.\n \"\"\"\n attention_weights = tf.nn.softmax(scores, axis=-1, name=\"attention_weights\")\n output = tf.matmul(attention_weights, value)\n return output\n\n def get_config(self):\n config = super(AttentionLayer, self).get_config()\n config[\"use_scale\"] = self.use_scale\n return config\n\n @classmethod\n def from_config(cls, config):\n return cls(**config)\n\n def _validate_call_args(self, inputs):\n \"\"\"Validates arguments of the call method.\"\"\"\n class_name = self.__class__.__name__\n if not isinstance(inputs, list):\n raise ValueError(\n \"{} layer must be called on a list of inputs, namely \"\n \"[query, value, key].\".format(class_name)\n )\n if len(inputs) != 3:\n raise ValueError(\n \"{} layer accepts inputs list of length 3, \"\n \"namely [query, value, key]. \"\n \"Given length: {}\".format(class_name, len(inputs))\n )\n\n\nclass CentralizedValueMixin(object):\n \"\"\"Add methods to evaluate the central value function from the model.\"\"\"\n\n # the sample batch need to be put in a placeholder before\n # being feed to the network, otherwise it will redefine the tensor dimensions\n def __init__(self):\n self.central_value_function = self.model.central_value_function(\n self.get_placeholder(SampleBatch.CUR_OBS), self.get_placeholder(OTHER_AGENT)\n )\n\n def compute_central_value_function(\n self, obs, other_agent\n ): # opponent_obs, opponent_actions):\n feed_dict = {\n self.get_placeholder(SampleBatch.CUR_OBS): obs,\n self.get_placeholder(OTHER_AGENT): other_agent,\n }\n return self.get_session().run(self.central_value_function, feed_dict)\n\n\n# Grabs the other obs/policy and includes it in the experience train_batch,\n# and computes GAE using the central vf predictions.\ndef centralized_critic_postprocessing(\n policy, sample_batch, other_agent_batches=None, episode=None\n):\n # one hot encoding parser\n one_hot_enc = functools.partial(one_hot_encoding, n_classes=policy.action_space.n)\n max_num_opponents = policy.model.max_num_opponents\n\n if policy.loss_initialized():\n assert other_agent_batches is not None\n\n if len(other_agent_batches) > max_num_opponents:\n raise ValueError(\n \"The number of opponents is too large, got {} (max at {})\".format(\n len(other_agent_batches), max_num_opponents\n )\n )\n\n # lifespan of the agents\n time_span = (sample_batch[\"t\"][0], sample_batch[\"t\"][-1])\n\n # agents whose time overlaps with the current agent time_span\n # returns agent_id: agent_time_span, opp_sample_batch\n opponents = [\n Opponent(\n (opp_batch[\"t\"][0], opp_batch[\"t\"][-1]),\n opp_batch[SampleBatch.CUR_OBS],\n one_hot_enc(opp_batch[SampleBatch.ACTIONS]),\n )\n for agent_id, (_, opp_batch) in other_agent_batches.items()\n if time_overlap(time_span, (opp_batch[\"t\"][0], opp_batch[\"t\"][-1]))\n ]\n\n # apply the adequate cropping or padding compared to time_span\n for opp in opponents:\n opp.crop_or_pad(time_span)\n\n # add a padding for the missing opponents\n missing_opponent = Opponent(\n None,\n np.zeros_like(sample_batch[SampleBatch.CUR_OBS]),\n one_hot_enc(np.zeros_like(sample_batch[SampleBatch.ACTIONS])),\n )\n opponents = opponents + (\n [missing_opponent] * (max_num_opponents - len(opponents))\n )\n\n # add random permutation of the opponents\n perm = np.random.permutation(np.arange(max_num_opponents))\n opponents = [opponents[p] for p in perm]\n\n # add the oppponents' information into sample_batch\n sample_batch[OTHER_AGENT] = np.concatenate(\n [opp.observation for opp in opponents] + [opp.action for opp in opponents],\n axis=-1,\n )\n # overwrite default VF prediction with the central VF\n sample_batch[SampleBatch.VF_PREDS] = policy.compute_central_value_function(\n sample_batch[SampleBatch.CUR_OBS], sample_batch[OTHER_AGENT]\n )\n\n else:\n\n # opponents' observation placeholder\n missing_obs = np.zeros_like(sample_batch[SampleBatch.CUR_OBS])\n missing_act = one_hot_enc(np.zeros_like(sample_batch[SampleBatch.ACTIONS]))\n sample_batch[OTHER_AGENT] = np.concatenate(\n [missing_obs for _ in range(max_num_opponents)]\n + [missing_act for _ in range(max_num_opponents)],\n axis=-1,\n )\n\n # value prediction\n sample_batch[SampleBatch.VF_PREDS] = np.zeros_like(\n sample_batch[SampleBatch.ACTIONS], dtype=np.float32\n )\n\n train_batch = compute_advantages(\n sample_batch,\n 0.0,\n policy.config[\"gamma\"],\n policy.config[\"lambda\"],\n use_gae=policy.config[\"use_gae\"],\n )\n return train_batch\n\n\n# Copied from PPO but optimizing the central value function\ndef loss_with_central_critic(policy, model, dist_class, train_batch):\n CentralizedValueMixin.__init__(policy)\n\n logits, state = model.from_batch(train_batch)\n action_dist = dist_class(logits, model)\n policy.central_value_out = policy.model.central_value_function(\n train_batch[SampleBatch.CUR_OBS], train_batch[OTHER_AGENT]\n )\n\n policy.loss_obj = PPOLoss(\n dist_class,\n model,\n train_batch[Postprocessing.VALUE_TARGETS],\n train_batch[Postprocessing.ADVANTAGES],\n train_batch[SampleBatch.ACTIONS],\n train_batch[SampleBatch.ACTION_DIST_INPUTS],\n train_batch[SampleBatch.ACTION_LOGP],\n train_batch[SampleBatch.VF_PREDS],\n action_dist,\n policy.central_value_out,\n policy.kl_coeff,\n tf.ones_like(train_batch[Postprocessing.ADVANTAGES], dtype=tf.bool),\n entropy_coeff=policy.entropy_coeff,\n clip_param=policy.config[\"clip_param\"],\n vf_clip_param=policy.config[\"vf_clip_param\"],\n vf_loss_coeff=policy.config[\"vf_loss_coeff\"],\n use_gae=policy.config[\"use_gae\"],\n )\n\n return policy.loss_obj.loss\n\n\ndef setup_mixins(policy, obs_space, action_space, config):\n # copied from PPO\n KLCoeffMixin.__init__(policy, config)\n EntropyCoeffSchedule.__init__(\n policy, config[\"entropy_coeff\"], config[\"entropy_coeff_schedule\"]\n )\n LearningRateSchedule.__init__(policy, config[\"lr\"], config[\"lr_schedule\"])\n\n\ndef central_vf_stats(policy, train_batch, grads):\n # Report the explained variance of the central value function.\n return {\n \"vf_explained_var\": explained_variance(\n train_batch[Postprocessing.VALUE_TARGETS], policy.central_value_out\n )\n }\n\n\ndef one_hot_encoding(values, n_classes):\n return np.eye(n_classes)[values]\n\n\ndef time_overlap(time_span, agent_time):\n \"\"\"Check if agent_time overlaps with time_span\"\"\"\n return agent_time[0] <= time_span[1] and agent_time[1] >= time_span[0]\n\n\nclass Opponent(object):\n def __init__(\n self, time_span: Tuple[int, int], observation: np.ndarray, action: np.ndarray\n ):\n self.time_span = time_span\n self.observation = observation\n self.action = action\n\n def crop_or_pad(self, reference_time_span):\n time_difference = self._get_time_difference(reference_time_span)\n for key in self.__dict__:\n if key == \"time_span\":\n continue\n setattr(\n self, key, Opponent._crop_or_pad(getattr(self, key), *time_difference)\n )\n\n def _get_time_difference(self, reference):\n lower = reference[0] - self.time_span[0]\n upper = self.time_span[1] - reference[1]\n return lower, upper\n\n @staticmethod\n def _crop_or_pad(values, lower, upper):\n values = values[max(lower, 0) :]\n values = values[: len(values) - max(upper, 0)]\n values = np.pad(\n values,\n pad_width=[\n (-min(lower, 0), -min(0, upper)),\n *[(0, 0) for k in range(values.ndim - 1)],\n ],\n mode=\"constant\",\n )\n return values\n\n\nCCPPOPolicy = PPOTFPolicy.with_updates(\n name=\"CCPPOPolicy\",\n postprocess_fn=centralized_critic_postprocessing,\n loss_fn=loss_with_central_critic,\n before_loss_init=setup_mixins,\n grad_stats_fn=central_vf_stats,\n mixins=[\n LearningRateSchedule,\n EntropyCoeffSchedule,\n KLCoeffMixin,\n CentralizedValueMixin,\n ],\n)\nregister_trainable(\n \"CcTransformer\",\n PPOTrainer.with_updates(\n name=\"CCPPOTrainer\", get_policy_class=lambda c: CCPPOPolicy\n ),\n)\n","sub_path":"models/cc_transformer.py","file_name":"cc_transformer.py","file_ext":"py","file_size_in_byte":23350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"188470972","text":"from tkinter import *\nimport webbrowser\nclass App:\n def __init__(self, master):\n #structcure\n self.searchbox = Entry(master)\n self.searchbox.grid(row=1, column=1)\n\n self.searchbutton = Button(master, text=\"search\", bg=\"red\", fg=\"white\", command=self.search)\n self.searchbutton.grid(row=1, column=2, sticky=W)\n\n\n\n def search(self):\n self.query = self.searchbox.get()\n webbrowser.open(\"https://plus.google.com/u/0/s/\" + self.query)\n\n\n\n\n\n\n\nroot = Tk()\napp = App(root)\nroot.mainloop()\n","sub_path":"googleplus.py","file_name":"googleplus.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"379279323","text":"import pandas as pd\nfrom sklearn import preprocessing\nimport numpy as np\nimport matplotlib.pyplot as plt\n# Plot Figure 1A\n\n# prefilename = 'Fig1_A'\n# plotting the modified version of Figure 1A\nprefilename= 'Fig1_A'\nfilename = '%s.csv' % prefilename\ndata = pd.read_csv(filename)\ntime = data['time']\ntime = time\nvm = data['Vm']\nca_y = data['y']\np_ip3 = data['ip3']\nca_z = data[ 'z' ]\n# Data preparation\nvm = 100 + vm\n# Normalize the data\nvm_norm = preprocessing.normalize(np.array(vm).reshape(1, -1))\nca_z_norm = preprocessing.normalize(np.array(ca_z).reshape(1, -1))\np_ip3_norm = preprocessing.normalize(np.array(p_ip3).reshape(1, -1))\nca_y_norm = preprocessing.normalize(np.array(ca_y).reshape(1, -1))\n\n\nfig, axs = plt.subplots()\nlabelfontsize = 12\naxs.plot(time, vm_norm[0],'k')\naxs.plot(time, p_ip3_norm[0] , '#8f8f8f')\naxs.plot(time, ca_z_norm[0] , '--k')\naxs.set_ylabel ('Normalized Parameters', fontsize= labelfontsize)\naxs.set_xlabel ('Time (min)', fontsize= labelfontsize)\naxs.legend([\"Vm\",\"IP3\",\"Ca-c\"],loc =\"upper right\")\naxs.set_yticklabels([])\naxs.set_title('Phase Plot')\naxs.set_xlim([0, 8])\naxs.set_ylim([0, 0.1120])\nplt.show()\nplt.savefig('Figure_1A')","sub_path":"Simulation/Fig1_plot.py","file_name":"Fig1_plot.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"378152165","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport cv2\nfrom collections import Counter\n\n'''\n图片的颜色的提取, 这个还是很给的\n'''\n\ndef dict2list(dic:dict):\n ''''' 将字典转化为列表 '''\n keys = dic.keys()\n vals = dic.values()\n lst = [(key, val) for key, val in zip(keys, vals)]\n return lst\n\ndef get_img_col_feature(img_name, k=10, top=5, alpha=10):\n \n img = cv2.imread(img_name)\n # img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n # img = cv2.cvtColor(img, cv2.COLOR_BGR2Luv)\n img = cv2.resize(img, (img.shape[1]//alpha, img.shape[0]//alpha))\n Z = img.reshape((-1,3))\n # convert to np.float32\n Z = np.float32(Z)\n # define criteria, number of clusters(K) and apply kmeans()\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)\n ret,label,center=cv2.kmeans(Z,k,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)\n # Now convert back into uint8, and make original image\n # 聚类中心,这里对用色值\n center = np.uint8(center)\n # 聚类后, 按照每类的数量进行排序\n k_muns = dict(Counter(label.flatten()))\n dc = sorted(dict2list(k_muns), key=lambda d:d[1], reverse=True)\n # 排序后获取 top\n dc_top = dc[:top]\n # 组成特征向量\n clo_features = []\n for dc_i in dc_top:\n fea = center[dc_i[0]]\n clo_features.append(fea)\n features = np.array(clo_features).flatten()\n print ('>>>>', features)\n res = center[label.flatten()]\n res2 = res.reshape((img.shape))\n return features\n\n\nif __name__ == '__main__':\n img_name = 'lp.jpg'\n res2 = get_img_col_feature(img_name)\n print (res2)\n","sub_path":"Clustering/kmeans_img_color_cluster.py","file_name":"kmeans_img_color_cluster.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"469728565","text":"#!/usr/bin/env python2\n\n# transpiled with BefunCompile v1.3.0 (c) 2017\nimport sys\nimport zlib, base64\n_g = (\"AR+LCAAAAAAABADt2slqwzAQgOFXcb1cIi8jxzpEBNEHCUkPBV110skPnwmmgdCWlDaB0PzfRaPRgpDxnJQLPJIKAAAAAAAAAAAAAAAAAIB/4OqDuZA/gvPcfIq2d3qg\"\n + \"9+RC+V5OK28lObdaiSSRGGzn/ajhbhz8HDSMu6baH/xaklkv49qt8/XtcQOz/zKdjB2i2ChjszT8IreX6/qin6zIxT0H++3a/X2O9NSC1ifbnYrTRlLQymQlNkszWBMn\"\n + \"SdOSm845J6nQGU5iV+WDRjr09rKEfVl0Rdm2reZ650xLTfuDOcnYWa+l6Kcrtp9D/bLGRJl0lygbatrvvPZc3iM4ApHVms+QMwAA\")\ng = base64.b64decode(_g)[1:]\nfor i in range(ord(base64.b64decode(_g)[0])):\n g = zlib.decompress(g, 16+zlib.MAX_WBITS)\ng=list(map(ord, g))\ndef gr(x,y):\n if(x>=0 and y>=0 and x<400 and y<33):\n return g[y*400 + x];\n return 0;\ndef gw(x,y,v):\n if(x>=0 and y>=0 and x<400 and y<33):\n g[y*400 + x]=v;\ndef td(a,b):\n return ((0)if(b==0)else(a//b))\ndef tm(a,b):\n return ((0)if(b==0)else(a%b))\ns=[]\ndef sp():\n global s\n if (len(s) == 0):\n return 0\n return s.pop()\ndef sa(v):\n global s\n s.append(v)\ndef sr():\n global s\n if (len(s) == 0):\n return 0\n return s[-1]\ndef _0():\n gw(1,0,400)\n gw(0,0,10000)\n sa(gr(0,0)-1)\n sa(0)\n sa((gr(0,0)-1)/2)\n sa((gr(0,0)-1)/2)\n gw(2,0,gr(0,0)-1)\n return 1\ndef _1():\n return (2)if(sp()!=0)else(15)\ndef _2():\n global t0\n global t1\n t0=gr(2,0)\n sa(sr());\n sa(t0)\n v0=sp()\n v1=sp()\n sa(v0)\n sa(v1)\n v0=sp()\n sa(tm(sp(),v0))\n\n t1=sp()\n\n return (14)if((t1)!=0)else(3)\ndef _3():\n sa(sr());\n gw(3,0,sp())\n sa(sp()+sp());\n\n sa(gr(3,0)-1)\n sa(gr(3,0)-1)\n return 4\ndef _4():\n return (2)if(sp()!=0)else(5)\ndef _5():\n sp();\n gw(tm(gr(2,0),gr(1,0)),(td(gr(2,0),gr(1,0)))+1,sp())\n return 6\ndef _6():\n sa(sr());\n\n return (13)if(sp()!=0)else(7)\ndef _7():\n gw(0,1,0)\n gw(2,0,gr(0,0)-1)\n gw(9,0,0)\n sp();\n sp();\n return 8\ndef _8():\n gw(4,0,gr(tm(gr(2,0),gr(1,0)),(td(gr(2,0),gr(1,0)))+1))\n gw(5,0,gr(tm(gr(4,0),gr(1,0)),(td(gr(4,0),gr(1,0)))+1))\n\n return (9)if((gr(2,0)-gr(5,0))!=0)else(11)\ndef _9():\n global t2\n t2=gr(2,0)\n gw(2,0,gr(2,0)-1)\n\n return (8)if((t2)!=0)else(10)\ndef _10():\n sys.stdout.write(str(gr(9,0))+\" \")\n sys.stdout.flush()\n return 16\ndef _11():\n return (12)if(gr(2,0)>gr(4,0))else(9)\ndef _12():\n sys.stdout.write(str(gr(2,0))+\" \")\n sys.stdout.flush()\n\n sys.stdout.write(\" - \")\n sys.stdout.flush()\n\n sys.stdout.write(str(gr(4,0))+\" \")\n sys.stdout.flush()\n\n sys.stdout.write(chr(10))\n sys.stdout.flush()\n\n gw(9,0,gr(9,0)+gr(2,0)+gr(4,0))\n return 9\ndef _13():\n sa(sp()-1)\n\n sa(sr());\n sa(sr());\n gw(2,0,sp())\n sa(0)\n v0=sp()\n v1=sp()\n sa(v0)\n sa(v1)\n sa(sp()/2);\n\n sa(sr());\n return 1\ndef _14():\n sa(sp()-1)\n\n sa(sr());\n return 4\ndef _15():\n gw(tm(gr(2,0),gr(1,0)),(td(gr(2,0),gr(1,0)))+1,1)\n return 6\nm=[_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,_11,_12,_13,_14,_15]\nc=0\nwhile c<16:\n c=m[c]()\n","sub_path":"compiled/Python2/Euler_Problem-021.py","file_name":"Euler_Problem-021.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"325163636","text":"import unittest\nfrom typing import cast\n\nfrom ywh2bt.core.configuration.headers import Headers\nfrom ywh2bt.core.configuration.yeswehack import Bugtrackers, OAuthSettings, Program, Programs, YesWeHackConfiguration\n\n\nclass TestYesWeHack(unittest.TestCase):\n\n def test_constructor(self) -> None:\n ywh = YesWeHackConfiguration(\n api_url='http://example.com',\n apps_headers=Headers(\n foo='bar',\n ),\n login='michel@example.com',\n password='my-password',\n oauth_args=OAuthSettings(\n client_id='client-id',\n client_secret='client-secret',\n redirect_uri='http://example.com/oauth/redirect',\n ),\n verify=True,\n programs=Programs(\n [\n Program(\n slug='1-pgm',\n bugtrackers_name=Bugtrackers(\n [\n 'bt1',\n 'bt2',\n ],\n ),\n ),\n ],\n ),\n )\n\n self.assertEqual('http://example.com', ywh.api_url)\n self.assertEqual(\n dict(\n foo='bar',\n ),\n ywh.apps_headers,\n )\n self.assertEqual('michel@example.com', ywh.login)\n self.assertEqual('my-password', ywh.password)\n self.assertEqual('client-id', cast(OAuthSettings, ywh.oauth_args).client_id)\n self.assertEqual('client-secret', cast(OAuthSettings, ywh.oauth_args).client_secret)\n self.assertEqual('http://example.com/oauth/redirect', cast(OAuthSettings, ywh.oauth_args).redirect_uri)\n self.assertEqual(first=True, second=ywh.verify)\n self.assertEqual('1-pgm', cast(Programs, ywh.programs)[0].slug)\n self.assertEqual(\n [\n 'bt1',\n 'bt2',\n ],\n cast(Programs, ywh.programs)[0].bugtrackers_name,\n )\n","sub_path":"ywh2bt/tests/core/configuration/test_yeswehack.py","file_name":"test_yeswehack.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"49427747","text":"import csv\nimport numpy as np\nimport matplotlib.pyplot as plt\n\norbslam2_data_path = '../data/orbslam2_testing_path_xyz.csv'\ncolmap_data_path = '../data/colmap_testing_path_xyz.csv'\n\n\ndef read_dataset():\n points = {'orb': [], 'col': []}\n matches = {}\n col_match_ps_idx = []\n orb_match_ps_idx = []\n\n with open(orbslam2_data_path, newline='') as csvfile:\n orbslam2_data = list(csv.reader(csvfile))\n with open(colmap_data_path, newline='') as csvfile:\n colmap_data = list(csv.reader(csvfile))\n\n for i, row in enumerate(orbslam2_data):\n points['orb'].append(\n [float(val) for val in row[1:]])\n abosolute_i = int(row[0])\n if abosolute_i in matches:\n matches[abosolute_i]['orb'] = i\n else:\n matches[abosolute_i] = {'orb': i}\n\n for i, row in enumerate(colmap_data):\n points['col'].append(\n [float(val) for val in row[1:]])\n abosolute_i = int(row[0])\n if abosolute_i in matches:\n matches[abosolute_i]['col'] = i\n else:\n matches[abosolute_i] = {'col': i}\n\n for method in points:\n points[method] = np.array(points[method])\n\n for row in matches:\n match = matches[row]\n if 'orb' in match and 'col' in match:\n orb_match_ps_idx.append(match['orb'])\n col_match_ps_idx.append(match['col'])\n orb_match_ps_idx.sort()\n col_match_ps_idx.sort()\n \n return points, orb_match_ps_idx, col_match_ps_idx\n\n\ndef rotation_mtx(axis, angle):\n axis = np.array(axis)\n u = axis / np.linalg.norm(axis)\n cos = np.cos(angle)\n sin = np.sin(angle)\n c = 1 - cos\n ux, uy, uz = u[0], u[1], u[2]\n\n R = np.array(\n [[cos + ux**2 * c, ux * uy * c - uz * sin, ux * uz * c + uy * sin],\n [uy * ux * c + uz * sin, cos + uy**2 * c, uy * uz * c - ux * sin],\n [uz * ux * c - uy * sin, uz * uy * c + ux * sin, cos + uz**2 * c]]\n )\n\n return R\n\n\ndef get_angle(vec1, vec2):\n cos_val = np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))\n return np.arccos(cos_val)\n\n\ndef plot(pts_ORB, pts_COL):\n fig = plt.figure()\n ax = plt.axes(projection='3d')\n\n ax.scatter3D(pts_ORB[:, 0], pts_ORB[:, 1], pts_ORB[:, 2],\n c=[i for i in range(len(pts_ORB))], cmap='Reds')\n # ax.plot3D(pts_ORB[:, 0], pts_ORB[:, 1], pts_ORB[:, 2], color='green', label='ORB_SLAM2')\n ax.plot3D(pts_COL[:, 0], pts_COL[:, 1], pts_COL[:, 2], color='gray', label='COLMAP')\n ax.legend()\n plt.show()\n\n\nif __name__ == \"__main__\":\n # Read datasets and find matches\n points, orb_match_ps_idx, col_match_ps_idx = read_dataset()\n print('ORB0 COL0', points['orb'][orb_match_ps_idx[0]], points['col'][col_match_ps_idx[0]])\n\n # Move each center of models to the origin\n origin_col_mean = np.mean(points['col'], axis=0)\n points['orb'] -= np.mean(points['orb'], axis=0)\n points['col'] -= origin_col_mean\n\n # Scale ORB_SLAM2 model to the size of COLMOP model\n scale = (np.linalg.norm(points['col'][col_match_ps_idx[0]]) /\n np.linalg.norm(points['orb'][orb_match_ps_idx[0]]))\n points['orb'] *= scale\n\n print(f'Center both model to orgin and scale ORB_SLAM2 with {scale}')\n print('ORB0 COL0', points['orb'][orb_match_ps_idx[0]], points['col'][col_match_ps_idx[0]])\n\n # Rotate around z-axis\n # Make the projection of ORB_SLAM2 p0 to XY-plane the same as the projection of COLMAP p0 to XY-plane\n axis = [0, 0, 1]\n ang = get_angle(points['orb'][orb_match_ps_idx[0]][:2], points['col'][col_match_ps_idx[0]][:2])\n rot_mtx = rotation_mtx(axis, ang)\n points['orb'] = np.matmul(rot_mtx, points['orb'].T).T\n\n print(f'Rotate ORB_SLAM2 with an angle of {ang} RAD according to axis {axis}')\n print('ORB0 COL0', points['orb'][orb_match_ps_idx[0]], points['col'][col_match_ps_idx[0]])\n\n # Rotate around a vector on XY-plane orthogonal to the projection vector of ORB_SLAM2 p0 to XY-plane\n # Make ORB_SLAM2 p0 the same as COLMAP p0\n axis = [points['orb'][orb_match_ps_idx[0]][1], -points['orb'][orb_match_ps_idx[0]][0], 0]\n ang = get_angle(points['orb'][orb_match_ps_idx[0]], points['col'][col_match_ps_idx[0]])\n rot_mtx = rotation_mtx(axis, ang)\n points['orb'] = np.matmul(rot_mtx, points['orb'].T).T\n\n print(f'Rotate ORB_SLAM2 with an angle of {ang} RAD according to axis {axis}')\n print('ORB0 COL0', points['orb'][orb_match_ps_idx[0]], points['col'][col_match_ps_idx[0]])\n print('ORB1 COL1', points['orb'][orb_match_ps_idx[1]], points['col'][col_match_ps_idx[1]])\n\n # Rotate around ORB_SLAM2 p0 vector\n # Make ORB_SLAM2 p1 close to COLMAP p1\n # Rotate center is the intersection of the axis and the plane which is orthogonal to the axis and has ORB_SLAM2 p1 on\n axis = points['orb'][orb_match_ps_idx[0]]\n rotate_center = axis * (np.dot(axis, points['orb'][orb_match_ps_idx[1]]) / np.dot(axis, axis))\n ang = get_angle(\n points['orb'][orb_match_ps_idx[1]] - rotate_center,\n points['col'][col_match_ps_idx[1]] - rotate_center\n )\n rot_mtx = rotation_mtx(axis, ang)\n points['orb'] = np.matmul(rot_mtx, points['orb'].T).T\n\n print(f'Rotate ORB_SLAM2 with an angle of {ang} RAD according to axis {axis}')\n print(f'Rotate center {rotate_center}')\n print('ORB0 COL0', points['orb'][orb_match_ps_idx[0]], points['col'][col_match_ps_idx[0]])\n print('ORB1 COL1', points['orb'][orb_match_ps_idx[1]], points['col'][col_match_ps_idx[1]])\n\n # Move back to original COLMAP model coordinates\n points['orb'] += origin_col_mean\n points['col'] += origin_col_mean\n\n print(f'Move both model back to original COLMAP model coordinates')\n print('ORB0 COL0', points['orb'][orb_match_ps_idx[0]], points['col'][col_match_ps_idx[0]])\n\n # Calcualte errors and statistics\n error_vecs = np.subtract(\n [points['orb'][i] for i in orb_match_ps_idx],\n [points['col'][i] for i in col_match_ps_idx]\n )\n error_dists = np.linalg.norm(error_vecs, axis=1)\n mean_square_error = np.mean(error_dists)\n print('Mean error:', mean_square_error)\n\n # Plot both models\n plot(points['orb'], points['col'])\n","sub_path":"FinalProject/src/final.py","file_name":"final.py","file_ext":"py","file_size_in_byte":6161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"614846153","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\ndef w(x):\n \"\"\" weight \"\"\"\n\n w = np.exp(-x**2)/np.sqrt(np.pi)\n return w\n\n\ndef next_chain_link(x, y):\n \"\"\" checks whether y is accepted as next chain link \"\"\"\n\n gamma = np.random.rand()\n alpha = w(y)/w(x)\n\n return alpha >= gamma\n\n\ndef metro_alg(N):\n \"\"\" metropolis algorithm that creates markov chain of lenght N \"\"\"\n\n chain = []\n chain_removed = []\n chain.append(0)\n chain_removed.append(0)\n\n for i in range(N):\n j = 0\n y = (np.random.rand()-0.5)*10\n if next_chain_link(chain[i], y):\n chain.append(y)\n else:\n chain.append(chain[i])\n\n if next_chain_link(chain_removed[j], y):\n chain_removed.append(y)\n j += 1\n\n return chain, chain_removed\n\n\n# N = 100000\n# chain, chain_removed = metro_alg(N)\n#\n# x_values = np.linspace(-3, 3, N) #x values to plot w(x)\n# sns.distplot(chain, label=\"chain\")\n# sns.distplot(chain_removed, label=\"chain removed\")\n# plt.plot(x_values, w(x_values), label=\"weight\")\n# plt.legend()\n# plt.show()\n\n# a) little bump at the peak probably comes from random.rand which creates random number between 0 and whithout 1?\n# b) chain-removed has slightly lower peak but very little\n\n\n#######################################################################################################################\n\n#2 a)\n\nN = 64\nkb = 1 #boltzman constant\nindex = np.arange(1, N+1) #used to create random indices\n\n\n# def H(lattice, h):\n# \"\"\" calculates the energy H({s_l}) \"\"\"\n#\n# H = 0\n# for i in range(1, N+1):\n# for j in range(1, N+1):\n# H -= lattice[i, j]*(lattice[i, j-1] + lattice[i-1, j]) + h*lattice[i, j]\n# H -= 2*lattice[i, j] * (lattice[i, j - 1] + lattice[i - 1, j] + lattice[i, j + 1] + lattice[i + 1, j]) + 2*h * lattice[i, j]\n#\n# return H\n\n\n\n# def next_chain_link_ising(x, y, T, h):\n# \"\"\" checks whether y is accepted as next chain link \"\"\"\n#\n# gamma = np.random.rand()\n# alpha = np.exp(-(H(y, h) - H(x, h))/(kb * T))\n#\n# return alpha >= gamma\n\n\ndef transform_lattice(lattice):\n \"\"\" transforms random lattice into lattice of +1/2 and -1/2 and sets periodic bounds \"\"\"\n\n for i in range(N+1):\n for j in range(N+1):\n if lattice[i, j] >= 0.5:\n lattice[i, j] = 1/2\n else:\n lattice[i, j] = -1/2\n\n for i in range(N+1):\n lattice[0, i] = lattice[N, i]\n lattice[N+1, i] = lattice[1, i]\n lattice[i, 0] = lattice[i, N]\n lattice[i, N + 1] = lattice[i, 1]\n\n lattice[0, 0] = lattice[N, N]\n lattice[0, N+1] = lattice[N, 1]\n lattice[N+1, 0] = lattice[1, N]\n lattice[N+1, N+1] = lattice[1, 1]\n\n return lattice\n\n\ndef H(lattice, i, j, h, T):\n \"\"\" checks wether spin flip is accepted \"\"\"\n\n gamma = np.random.rand()\n delta_E = -2*lattice[i, j] * (lattice[i, j - 1] + lattice[i - 1, j] + lattice[i, j + 1] + lattice[i + 1, j]) - 2*h * lattice[i, j]\n\n return not (delta_E > 0 and np.exp(-(delta_E)/(kb * T)) > gamma)\n\n\ndef metro_ising(L, T, h):\n \"\"\" creates markov chain of lenght L and calculates magnetization \"\"\"\n\n lattice = transform_lattice(np.random.rand(N + 2, N + 2)) # +2 because of periodic bounds\n ising_chain = [lattice]\n m = 0\n\n for i in range(L):\n rand_row = np.random.choice(index)\n rand_col = np.random.choice(index)\n\n if H(ising_chain[i], rand_row, rand_col, h, T):\n new_lattice = ising_chain[i].copy()\n new_lattice[rand_row][rand_col] *= -1\n ising_chain.append(transform_lattice(new_lattice))\n else:\n ising_chain.append(ising_chain[i])\n\n m += np.sum(ising_chain[i][1:N + 1, 1:N + 1]) # magnetization\n\n return m\n\n\n\nchain_lenght = 100 # 10000 is too big\nh_arr = [0.1, 0.5, 1, 5]\nT = np.linspace(0.1, 30, 10)\n\n# a)\n\n# chain, _ = metro_ising(chain_lenght, T[0], h[0])\n# sns.heatmap(chain[chain_lenght-1][1:N, 1:N], xticklabels=False, yticklabels=False, cbar=False)\n# plt.title(\"T = \" + str(T[0]))\n# plt.legend()\n# plt.show()\n\n\n# b)\n\nm_val = []\nfor temp in T:\n m = metro_ising(chain_lenght, temp, h_arr[0])\n m_val.append(m/chain_lenght)\n\nplt.plot(T, m_val, label=\"h = \" + str(h_arr[0]))\nplt.ylabel(\"magnetization m\")\nplt.xlabel(\"Temperature T\")\nplt.legend()\nplt.show()\n\n\n","sub_path":"ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":4363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"150655424","text":"import urllib, urllib2\n\nfrom django.core.management.base import CommandError\nfrom django.utils import simplejson as json\n\nclass Forum(object):\n def __init__(self, id, shortname, name, created_at):\n self.id = int(id)\n self.shortname = shortname\n self.name = name\n self.created_at = created_at\n \n def __repr__(self):\n return \"\" % self.id\n\n def __eq__(self, other):\n if self.id == other.id:\n return True\n return False\n \nclass Thread(object):\n def __init__(self, id, forum, slug, title, created_at, allow_comments, \n url, hidden=False, identifier=None):\n self.id = int(id)\n self.forum = int(forum)\n self.slug = slug\n self.title = title\n self.created_at = created_at\n self.allow_comments = bool(allow_comments)\n self.url = url\n self.hidden = bool(hidden)\n self.identifier = identifier\n \n def __repr__(self):\n return \"\" % self.id\n \n def __eq__(self, other):\n if self.id == other.id:\n return True\n return False\n\nclass Post(object):\n def __init__(self, id, forum, thread, created_at, message, shown, points,\n is_anonymous, anonymous_author=None, author=None, \n parent_post=None):\n self.id = int(id)\n self.forum = int(forum)\n self.thread = int(thread)\n self.created_at = created_at\n self.message = message\n self.parent_post = parent_post\n self.shown = shown\n self.is_anonymous = bool(is_anonymous)\n self.anonymous_author = anonymous_author\n self.author = author\n\n def __repr__(self):\n return \"\" % self.id\n\n def __eq__(self, other):\n if self.id == other.id:\n return True\n return False\n\nclass AnonymousAuthor(object):\n def __init__(self, name, url, email_hash):\n self.name = name\n self.url = url\n self.email_hash = email_hash\n \n def __eq__(self, other):\n if self.email_hash == other.email_hash:\n return True\n return False\n \nclass Author(object):\n def __init__(self, id, username, display_name, url, email_hash, has_avatar):\n self.id = int(id)\n self.username = username\n self.display_name = display_name\n self.url = url\n self.email_hash = email_hash\n self.has_avatar = has_avatar\n\n def __repr__(self):\n return \"\" % (self.id, self.username)\n\n def __eq__(self, other):\n if self.id == other.id:\n return True\n return False\n\nclass DisqusApi(object):\n API_URL = 'http://disqus.com/api/'\n \n def __init__(self, api_key):\n self.api_key = api_key\n \n def call(self, method, data, post=False):\n \"\"\"\n Calls `method` from the DISQUS API with data either in POST or GET.\n Returns deserialized JSON response.\n \"\"\"\n url = \"%s%s\" % ('http://disqus.com/api/', method)\n if post:\n # POST request\n url += \"/\"\n data = urllib.urlencode(data)\n else:\n # GET request\n url += \"?%s\" % urllib.urlencode(data)\n data = ''\n res = json.load(urllib2.urlopen(url, data))\n if not res['succeeded']:\n raise CommandError(\"'%s' failed: %s\\nData: %s\" % (method, \n res['code'],\n data))\n return res['message']\n \n def get_forum_list(self):\n \"\"\"Returns a list of `Forum` objects associated with an API key\"\"\"\n return [Forum(id=f['id'],\n shortname=f['shortname'], \n name=f['name'], \n created_at=f['created_at']) \n for f in self.call('get_forum_list', \n {'user_api_key': self.api_key})]\n \n def get_forum_api_key(self, forum):\n \"\"\"Returns the forum API key for a `Forum`\"\"\"\n return self.call('get_forum_api_key', {'user_api_key': self.api_key, \n 'forum_id': forum.id})\n \n def get_thread_list(self, forum_api_key):\n \"\"\"Returns a list of `Thread` objects associated with an forum API key\"\"\"\n return [Thread(id=t['id'], \n allow_comments=t['allow_comments'], \n created_at=t['created_at'],\n forum=t['forum'],\n hidden=t['hidden'], \n identifier=t['identifier'], \n slug=t['slug'], \n title=t['title'], \n url=t['url']) \n for t in self.call('get_thread_list', \n {'forum_api_key': forum_api_key})]\n \n def get_thread_posts(self, forum_api_key, thread):\n \"\"\"Returns a list of `Thread` objects associated with an forum API key\"\"\"\n posts = []\n for post in self.call('get_thread_posts', {'forum_api_key': forum_api_key,\n 'thread_id': thread.id}):\n if post['is_anonymous']:\n anonymous_author = AnonymousAuthor(name=post['anonymous_author']['name'],\n url=post['anonymous_author']['url'],\n email_hash=post['anonymous_author']['email_hash'])\n author = None\n else:\n author = Author(id=post['author']['id'],\n username=post['author']['username'],\n display_name=post['author']['display_name'],\n url=post['author']['url'],\n email_hash=post['author']['email_hash'],\n has_avatar=post['author']['has_avatar'])\n anonymous_author = None\n \n posts.append(Post(id=post['id'], \n forum=post['forum'], \n thread=post['thread'],\n created_at=post['created_at'],\n message=post['message'], \n shown=post['shown'], \n points=post['points'], \n is_anonymous=post['is_anonymous'], \n anonymous_author=anonymous_author,\n author=author,\n parent_post=post['parent_post'],))\n return posts\n","sub_path":"disqus/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"161081519","text":"'''\nCreated on Apr 06, 2012\n\n@author: Michael Kraus (michael.kraus@ipp.mpg.de)\n'''\n\nimport argparse\n\nimport numpy as np\nfrom numpy.fft import *\n\nimport matplotlib\n#matplotlib.use('Cairo')\nmatplotlib.use('AGG')\n#matplotlib.use('PDF')\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm, colors, gridspec\nfrom matplotlib.colors import BoundaryNorm\nfrom matplotlib.ticker import MaxNLocator, MultipleLocator, FormatStrFormatter, ScalarFormatter\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\n\nfrom imhd.diagnostics import Diagnostics \n\n\nclass PlotMHD2D(object):\n '''\n classdocs\n '''\n\n def __init__(self, diagnostics, filename, ntMax=0, nPlot=1, write=False):\n '''\n Constructor\n '''\n \n# matplotlib.rc('text', usetex=True)\n matplotlib.rc('font', family='sans-serif', size='24')\n matplotlib.rcParams['contour.negative_linestyle'] = 'solid'\n matplotlib.rcParams['grid.linestyle'] = \"dotted\"\n \n self.prefix = filename\n \n self.ntMax = diagnostics.nt\n \n if self.ntMax > ntMax and ntMax > 0:\n self.ntMax = ntMax\n \n self.nPlot = nPlot\n self.iTime = -1\n \n self.diagnostics = diagnostics\n \n self.kx = fftshift(fftfreq(diagnostics.nx+1, diagnostics.hx))\n self.ky = fftshift(fftfreq(diagnostics.ny+1, diagnostics.hy))\n \n self.Bx = np.zeros((diagnostics.nx, diagnostics.ny))\n self.By = np.zeros((diagnostics.nx, diagnostics.ny))\n self.Vx = np.zeros((diagnostics.nx, diagnostics.ny))\n self.Vy = np.zeros((diagnostics.nx, diagnostics.ny))\n \n self.BxSpectrum = np.zeros((diagnostics.nx, diagnostics.ny))\n self.BySpectrum = np.zeros((diagnostics.nx, diagnostics.ny))\n self.VxSpectrum = np.zeros((diagnostics.nx, diagnostics.ny))\n self.VySpectrum = np.zeros((diagnostics.nx, diagnostics.ny))\n \n self.BxPhase = np.zeros((diagnostics.nx, diagnostics.ny))\n self.ByPhase = np.zeros((diagnostics.nx, diagnostics.ny))\n self.VxPhase = np.zeros((diagnostics.nx, diagnostics.ny))\n self.VyPhase = np.zeros((diagnostics.nx, diagnostics.ny))\n\n\n # compute initial phase\n \n self.Bx[:,:] = self.diagnostics.Bx\n self.By[:,:] = self.diagnostics.By\n self.Vx[:,:] = self.diagnostics.Vx\n self.Vy[:,:] = self.diagnostics.Vy\n \n BxFft = fftshift(fft2(self.Bx))\n ByFft = fftshift(fft2(self.By))\n VxFft = fftshift(fft2(self.Vx))\n VyFft = fftshift(fft2(self.Vy))\n \n self.BxPhase0 = np.angle(BxFft)\n self.ByPhase0 = np.angle(ByFft)\n self.VxPhase0 = np.angle(VxFft)\n self.VyPhase0 = np.angle(VyFft)\n\n \n self.read_data()\n \n \n # set up figure/window for Bx\n self.figure_Bx = plt.figure(num=1, figsize=(12,10))\n plt.subplots_adjust(left=0.15, right=0.85, top=0.9, bottom=0.12)\n \n # set up plot title\n self.title_Bx = self.figure_Bx.text(0.5, 0.93, 't = 0.0', horizontalalignment='center', fontsize=30)\n\n # create axes\n self.axes_Bx = plt.subplot(1,1,1)\n self.axes_Bx.set_xlabel('$k_x$', labelpad=15, fontsize=24)\n self.axes_Bx.set_ylabel('$k_y$', labelpad=15, fontsize=24)\n self.axes_Bx.set_xlim(self.kx[0], self.kx[-1])\n self.axes_Bx.set_ylim(self.ky[0], self.ky[-1])\n\n # create plot\n self.plot_Bx = self.axes_Bx.pcolormesh(self.kx, self.ky, self.BxSpectrum.T, cmap=plt.get_cmap('viridis'), vmin=self.BxSpectrum.min(), vmax=self.BxSpectrum.max())\n \n # add colorbar\n divider = make_axes_locatable(self.axes_Bx)\n cax_Bx = divider.append_axes('right', size='5%', pad=0.1)\n self.figure_Bx.colorbar(self.plot_Bx, cax=cax_Bx, orientation='vertical') \n\n \n # set up figure/window for By\n self.figure_By = plt.figure(num=2, figsize=(12,10))\n plt.subplots_adjust(left=0.15, right=0.85, top=0.9, bottom=0.12)\n \n # set up plot title\n self.title_By = self.figure_By.text(0.5, 0.93, 't = 0.0', horizontalalignment='center', fontsize=30)\n\n # create axes\n self.axes_By = plt.subplot(1,1,1)\n self.axes_By.set_xlabel('$k_x$', labelpad=15, fontsize=24)\n self.axes_By.set_ylabel('$k_y$', labelpad=15, fontsize=24)\n self.axes_By.set_xlim(self.kx[0], self.kx[-1])\n self.axes_By.set_ylim(self.ky[0], self.ky[-1])\n\n # create plot\n self.plot_By = self.axes_By.pcolormesh(self.kx, self.ky, self.BySpectrum.T, cmap=plt.get_cmap('viridis'), vmin=self.BySpectrum.min(), vmax=self.BySpectrum.max())\n \n # add colorbar\n divider = make_axes_locatable(self.axes_By)\n cax_By = divider.append_axes('right', size='5%', pad=0.1)\n self.figure_By.colorbar(self.plot_By, cax=cax_By, orientation='vertical') \n\n \n # set up figure/window for Vx\n self.figure_Vx = plt.figure(num=3, figsize=(12,10))\n plt.subplots_adjust(left=0.15, right=0.85, top=0.9, bottom=0.12)\n \n # set up plot title\n self.title_Vx = self.figure_Vx.text(0.5, 0.93, 't = 0.0', horizontalalignment='center', fontsize=30)\n\n # create axes\n self.axes_Vx = plt.subplot(1,1,1)\n self.axes_Vx.set_xlabel('$k_x$', labelpad=15, fontsize=24)\n self.axes_Vx.set_ylabel('$k_y$', labelpad=15, fontsize=24)\n self.axes_Vx.set_xlim(self.kx[0], self.kx[-1])\n self.axes_Vx.set_ylim(self.ky[0], self.ky[-1])\n\n # create plot\n self.plot_Vx = self.axes_Vx.pcolormesh(self.kx, self.ky, self.VxSpectrum.T, cmap=plt.get_cmap('viridis'), vmin=self.VxSpectrum.min(), vmax=self.VxSpectrum.max())\n \n # add colorbar\n divider = make_axes_locatable(self.axes_Vx)\n cax_Vx = divider.append_axes('right', size='5%', pad=0.1)\n self.figure_Vx.colorbar(self.plot_Vx, cax=cax_Vx, orientation='vertical') \n\n \n # set up figure/window for Vy\n self.figure_Vy = plt.figure(num=4, figsize=(12,10))\n plt.subplots_adjust(left=0.15, right=0.85, top=0.9, bottom=0.12)\n \n # set up plot title\n self.title_Vy = self.figure_Vy.text(0.5, 0.93, 't = 0.0', horizontalalignment='center', fontsize=30)\n\n # create axes\n self.axes_Vy = plt.subplot(1,1,1)\n self.axes_Vy.set_xlabel('$k_x$', labelpad=15, fontsize=24)\n self.axes_Vy.set_ylabel('$k_y$', labelpad=15, fontsize=24)\n self.axes_Vy.set_xlim(self.kx[0], self.kx[-1])\n self.axes_Vy.set_ylim(self.ky[0], self.ky[-1])\n\n # create plot\n self.plot_Vy = self.axes_Vy.pcolormesh(self.kx, self.ky, self.VySpectrum.T, cmap=plt.get_cmap('viridis'), vmin=self.VySpectrum.min(), vmax=self.VySpectrum.max())\n \n # add colorbar\n divider = make_axes_locatable(self.axes_Vy)\n cax_Vy = divider.append_axes('right', size='5%', pad=0.1)\n self.figure_Vy.colorbar(self.plot_Vy, cax=cax_Vy, orientation='vertical') \n\n \n \n # set up figure/window for Bx\n self.figure_phase_Bx = plt.figure(num=5, figsize=(12,10))\n plt.subplots_adjust(left=0.15, right=0.85, top=0.9, bottom=0.12)\n \n # set up plot title\n self.title_phase_Bx = self.figure_phase_Bx.text(0.5, 0.93, 't = 0.0', horizontalalignment='center', fontsize=30)\n\n # create axes\n self.axes_phase_Bx = plt.subplot(1,1,1)\n self.axes_phase_Bx.set_xlabel('$k_x$', labelpad=15, fontsize=24)\n self.axes_phase_Bx.set_ylabel('$k_y$', labelpad=15, fontsize=24)\n self.axes_phase_Bx.set_xlim(self.kx[0], self.kx[-1])\n self.axes_phase_Bx.set_ylim(self.ky[0], self.ky[-1])\n\n # create plot\n self.plot_phase_Bx = self.axes_phase_Bx.pcolormesh(self.kx, self.ky, self.BxPhase.T, cmap=plt.get_cmap('viridis'), vmin=-2.*np.pi, vmax=+2.*np.pi)\n \n # add colorbar\n divider = make_axes_locatable(self.axes_phase_Bx)\n cax_phase_Bx = divider.append_axes('right', size='5%', pad=0.1)\n self.figure_phase_Bx.colorbar(self.plot_phase_Bx, cax=cax_phase_Bx, orientation='vertical') \n\n \n # set up figure/window for By\n self.figure_phase_By = plt.figure(num=6, figsize=(12,10))\n plt.subplots_adjust(left=0.15, right=0.85, top=0.9, bottom=0.12)\n \n # set up plot title\n self.title_phase_By = self.figure_phase_By.text(0.5, 0.93, 't = 0.0', horizontalalignment='center', fontsize=30)\n\n # create axes\n self.axes_phase_By = plt.subplot(1,1,1)\n self.axes_phase_By.set_xlabel('$k_x$', labelpad=15, fontsize=24)\n self.axes_phase_By.set_ylabel('$k_y$', labelpad=15, fontsize=24)\n self.axes_phase_By.set_xlim(self.kx[0], self.kx[-1])\n self.axes_phase_By.set_ylim(self.ky[0], self.ky[-1])\n\n # create plot\n self.plot_phase_By = self.axes_phase_By.pcolormesh(self.kx, self.ky, self.ByPhase.T, cmap=plt.get_cmap('viridis'), vmin=-2.*np.pi, vmax=+2.*np.pi)\n \n # add colorbar\n divider = make_axes_locatable(self.axes_phase_By)\n cax_phase_By = divider.append_axes('right', size='5%', pad=0.1)\n self.figure_phase_By.colorbar(self.plot_phase_By, cax=cax_phase_By, orientation='vertical') \n\n \n # set up figure/window for Vx\n self.figure_phase_Vx = plt.figure(num=7, figsize=(12,10))\n plt.subplots_adjust(left=0.15, right=0.85, top=0.9, bottom=0.12)\n \n # set up plot title\n self.title_phase_Vx = self.figure_phase_Vx.text(0.5, 0.93, 't = 0.0', horizontalalignment='center', fontsize=30)\n\n # create axes\n self.axes_phase_Vx = plt.subplot(1,1,1)\n self.axes_phase_Vx.set_xlabel('$k_x$', labelpad=15, fontsize=24)\n self.axes_phase_Vx.set_ylabel('$k_y$', labelpad=15, fontsize=24)\n self.axes_phase_Vx.set_xlim(self.kx[0], self.kx[-1])\n self.axes_phase_Vx.set_ylim(self.ky[0], self.ky[-1])\n\n # create plot\n self.plot_phase_Vx = self.axes_phase_Vx.pcolormesh(self.kx, self.ky, self.VxPhase.T, cmap=plt.get_cmap('viridis'), vmin=-2.*np.pi, vmax=+2.*np.pi)\n \n # add colorbar\n divider = make_axes_locatable(self.axes_phase_Vx)\n cax_phase_Vx = divider.append_axes('right', size='5%', pad=0.1)\n self.figure_phase_Vx.colorbar(self.plot_phase_Vx, cax=cax_phase_Vx, orientation='vertical') \n\n \n # set up figure/window for Vy\n self.figure_phase_Vy = plt.figure(num=8, figsize=(12,10))\n plt.subplots_adjust(left=0.15, right=0.85, top=0.9, bottom=0.12)\n \n # set up plot title\n self.title_phase_Vy = self.figure_phase_Vy.text(0.5, 0.93, 't = 0.0', horizontalalignment='center', fontsize=30)\n\n # create axes\n self.axes_phase_Vy = plt.subplot(1,1,1)\n self.axes_phase_Vy.set_xlabel('$k_x$', labelpad=15, fontsize=24)\n self.axes_phase_Vy.set_ylabel('$k_y$', labelpad=15, fontsize=24)\n self.axes_phase_Vy.set_xlim(self.kx[0], self.kx[-1])\n self.axes_phase_Vy.set_ylim(self.ky[0], self.ky[-1])\n\n # create plot\n self.plot_phase_Vy = self.axes_phase_Vy.pcolormesh(self.kx, self.ky, self.VyPhase.T, cmap=plt.get_cmap('viridis'), vmin=-2.*np.pi, vmax=+2.*np.pi)\n \n # add colorbar\n divider = make_axes_locatable(self.axes_phase_Vy)\n cax_phase_Vy = divider.append_axes('right', size='5%', pad=0.1)\n self.figure_phase_Vy.colorbar(self.plot_phase_Vy, cax=cax_phase_Vy, orientation='vertical') \n\n \n # add data for zero timepoint and compute boundaries\n self.add_timepoint()\n \n \n # plot\n self.update()\n \n \n def read_data(self):\n \n self.Bx[:,:] = self.diagnostics.Bx\n self.By[:,:] = self.diagnostics.By\n self.Vx[:,:] = self.diagnostics.Vx\n self.Vy[:,:] = self.diagnostics.Vy\n \n BxFft = fftshift(fft2(self.Bx))\n ByFft = fftshift(fft2(self.By))\n VxFft = fftshift(fft2(self.Vx))\n VyFft = fftshift(fft2(self.Vy))\n \n self.BxSpectrum[:,:] = np.abs(BxFft)\n self.BySpectrum[:,:] = np.abs(ByFft)\n self.VxSpectrum[:,:] = np.abs(VxFft)\n self.VySpectrum[:,:] = np.abs(VyFft)\n \n self.BxPhase[:,:] = np.angle(BxFft) - self.BxPhase0\n self.ByPhase[:,:] = np.angle(ByFft) - self.ByPhase0\n self.VxPhase[:,:] = np.angle(VxFft) - self.VxPhase0\n self.VyPhase[:,:] = np.angle(VyFft) - self.VyPhase0\n \n \n \n def update(self):\n \n if not (self.iTime == 0 or (self.iTime) % self.nPlot == 0 or self.iTime == self.ntMax):\n return\n \n self.read_data()\n\n self.plot_Bx.set_array(self.BxSpectrum.T.ravel())\n self.plot_By.set_array(self.BySpectrum.T.ravel())\n self.plot_Vx.set_array(self.VxSpectrum.T.ravel())\n self.plot_Vy.set_array(self.VySpectrum.T.ravel())\n \n self.plot_phase_Bx.set_array(self.BxPhase.T.ravel())\n self.plot_phase_By.set_array(self.ByPhase.T.ravel())\n self.plot_phase_Vx.set_array(self.VxPhase.T.ravel())\n self.plot_phase_Vy.set_array(self.VyPhase.T.ravel())\n \n self.figure_Bx.savefig(self.prefix + str('_spectrum_Bx_%06d' % self.iTime) + '.png', dpi=100)\n self.figure_By.savefig(self.prefix + str('_spectrum_By_%06d' % self.iTime) + '.png', dpi=100)\n self.figure_Vx.savefig(self.prefix + str('_spectrum_Vx_%06d' % self.iTime) + '.png', dpi=100)\n self.figure_Vy.savefig(self.prefix + str('_spectrum_Vy_%06d' % self.iTime) + '.png', dpi=100)\n \n self.figure_phase_Bx.savefig(self.prefix + str('_phase_Bx_%06d' % self.iTime) + '.png', dpi=100)\n self.figure_phase_By.savefig(self.prefix + str('_phase_By_%06d' % self.iTime) + '.png', dpi=100)\n self.figure_phase_Vx.savefig(self.prefix + str('_phase_Vx_%06d' % self.iTime) + '.png', dpi=100)\n self.figure_phase_Vy.savefig(self.prefix + str('_phase_Vy_%06d' % self.iTime) + '.png', dpi=100)\n \n\n \n \n def add_timepoint(self):\n self.iTime += 1\n self.title_Bx.set_text('t = %1.2f' % (self.diagnostics.tGrid[self.iTime]))\n self.title_By.set_text('t = %1.2f' % (self.diagnostics.tGrid[self.iTime]))\n self.title_Vx.set_text('t = %1.2f' % (self.diagnostics.tGrid[self.iTime]))\n self.title_Vy.set_text('t = %1.2f' % (self.diagnostics.tGrid[self.iTime]))\n self.title_phase_Bx.set_text('t = %1.2f' % (self.diagnostics.tGrid[self.iTime]))\n self.title_phase_By.set_text('t = %1.2f' % (self.diagnostics.tGrid[self.iTime]))\n self.title_phase_Vx.set_text('t = %1.2f' % (self.diagnostics.tGrid[self.iTime]))\n self.title_phase_Vy.set_text('t = %1.2f' % (self.diagnostics.tGrid[self.iTime]))\n \n\n\nclass Plot(object):\n '''\n \n '''\n\n\n def __init__(self, hdf5_file, nPlot=1, ntMax=0):\n '''\n Constructor\n '''\n \n self.diagnostics = Diagnostics(hdf5_file)\n \n if ntMax > 0 and ntMax < self.diagnostics.nt:\n self.nt = ntMax\n else:\n self.nt = self.diagnostics.nt\n \n self.plot = PlotMHD2D(self.diagnostics, args.hdf5_file.replace(\".hdf5\", \"\"), self.nt, nPlot)\n \n \n def update(self, itime):\n self.diagnostics.read_from_hdf5(itime)\n self.diagnostics.update_invariants(itime)\n \n if itime > 0:\n self.plot.add_timepoint()\n \n self.plot.update()\n \n \n def run(self):\n for itime in range(1, self.nt+1):\n print(\"it = %4i\" % (itime))\n self.update(itime)\n \n \n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Ideal MHD Solver in 2D')\n \n parser.add_argument('hdf5_file', metavar='', type=str,\n help='Run HDF5 File')\n parser.add_argument('-np', metavar='i', type=int, default=1,\n help='plot every i\\'th frame')\n parser.add_argument('-ntmax', metavar='i', type=int, default=0,\n help='limit to i points in time')\n \n args = parser.parse_args()\n \n \n print\n print(\"Replay run with \" + args.hdf5_file)\n print\n \n pyvp = Plot(args.hdf5_file, ntMax=args.ntmax, nPlot=args.np)\n pyvp.run()\n \n print\n print(\"Replay finished.\")\n print\n \n","sub_path":"diag_spectrum.py","file_name":"diag_spectrum.py","file_ext":"py","file_size_in_byte":16493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"150900025","text":"\"\"\"Copyright 2013 Google Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\n\nimport types\n\nfrom . import arg_binding_keys\nfrom . import decorators\nfrom . import errors\n\n\nclass ObjectProvider(object):\n\n def __init__(self, binding_mapping, bindable_scopes, allow_injecting_none):\n self._binding_mapping = binding_mapping\n self._bindable_scopes = bindable_scopes\n self._allow_injecting_none = allow_injecting_none\n\n def provide_from_arg_binding_key(self, arg_binding_key, injection_context):\n binding_key = arg_binding_key.binding_key\n binding = self._binding_mapping.get(binding_key)\n scope = self._bindable_scopes.get_sub_scope(binding)\n def Provide():\n provided = scope.provide(\n binding_key,\n lambda: binding.proviser_fn(injection_context.get_child(binding), self))\n if (provided is None) and not self._allow_injecting_none:\n raise errors.InjectingNoneDisallowedError(\n binding.proviser_fn._pinject_desc)\n return provided\n provider_indirection = arg_binding_key.provider_indirection\n provided = provider_indirection.StripIndirectionIfNeeded(Provide)\n return provided\n\n def provide_class(self, cls, injection_context):\n if type(cls.__init__) is types.MethodType:\n init_kwargs = self.get_injection_kwargs(\n cls.__init__, injection_context)\n else:\n init_kwargs = {}\n return cls(**init_kwargs)\n\n def call_with_injection(self, provider_fn, injection_context):\n kwargs = self.get_injection_kwargs(provider_fn, injection_context)\n return provider_fn(**kwargs)\n\n def get_injection_kwargs(self, fn, injection_context):\n return arg_binding_keys.create_kwargs(\n decorators.get_injectable_arg_binding_keys(fn),\n lambda abk: self.provide_from_arg_binding_key(abk, injection_context))\n","sub_path":"pinject/object_providers.py","file_name":"object_providers.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"272760811","text":"import numpy as np\n\nfrom .experience import Experience\n\n\nclass AgentBase:\n\n def __init__(self,\n action_space,\n memory: Experience=None,\n discount_factor_gamma=0.99,\n state_preprocessor=None):\n\n if memory is None:\n memory = Experience(max_length=1000)\n if isinstance(action_space, int):\n action_space = np.arange(action_space)\n if hasattr(action_space, \"n\"):\n action_space = np.arange(action_space.n)\n\n self.memory = memory\n self.possible_actions = action_space\n self.states = []\n self.rewards = []\n self.actions = []\n self.dones = []\n self.gamma = discount_factor_gamma\n self.learning = True\n self.preprocess = self._preprocess_noop if state_preprocessor is None else state_preprocessor\n\n def set_learning_mode(self, switch: bool):\n self.learning = switch\n\n @staticmethod\n def _preprocess_noop(state):\n return state\n\n def sample(self, state, reward, done):\n raise NotImplementedError\n\n def push_experience(self, state, reward, done):\n raise NotImplementedError\n\n def fit(self, batch_size=32, verbose=1):\n raise NotImplementedError\n\n def _reset_direct_memory(self):\n self.states = []\n self.rewards = []\n self.actions = []\n self.dones = []\n","sub_path":"trickster/abstract.py","file_name":"abstract.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"632936783","text":"from pyinaturalist.client import iNatClient\nfrom pyinaturalist.constants import API_V1\nfrom pyinaturalist.models import User\nfrom test.sample_data import SAMPLE_DATA\n\n\ndef test_from_id(requests_mock):\n requests_mock.get(\n f'{API_V1}/users/1',\n json=SAMPLE_DATA['get_user_by_id'],\n status_code=200,\n )\n\n result = iNatClient().users(1)\n assert isinstance(result, User)\n assert result.id == 1\n\n\ndef test_from_ids(requests_mock):\n requests_mock.get(\n f'{API_V1}/users/1',\n json=SAMPLE_DATA['get_user_by_id'],\n status_code=200,\n )\n requests_mock.get(\n f'{API_V1}/users/2',\n json=SAMPLE_DATA['get_user_by_id'],\n status_code=200,\n )\n\n results = iNatClient().users.from_ids(1, 2).all()\n assert len(results) == 2 and isinstance(results[0], User)\n assert results[0].id == 1\n\n\ndef test_autocomplete(requests_mock):\n requests_mock.get(\n f'{API_V1}/users/autocomplete',\n json=SAMPLE_DATA['get_users_autocomplete'],\n status_code=200,\n )\n\n results = iNatClient().users.autocomplete(q='nico')\n assert len(results) == 3 and isinstance(results[0], User)\n assert results[0].id == 886482\n","sub_path":"test/controllers/test_user_controller.py","file_name":"test_user_controller.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"255372902","text":"#!/usr/bin/env python3\n\n\"\"\" Setup script to install PyGeom package. \"\"\"\n\n# This is the PyGeom version, not the setup script version.\n__version__ = \"0.9\"\n\nimport distutils.core\n\npackages = ['PyGeom']\n\npackage_data = []\n\nscripts = []\n\nsetup_kwargs = {\n 'name': 'PyGeom',\n 'version': __version__,\n 'description': 'HEP Geometry Visualization Library',\n 'long_description': 'PyROOT Visualization Toolkit for GEANT/GEMC and'\n ' ROOT geometries.',\n 'author': 'Maurik Holtrop',\n 'author_email': 'Mauirk.Holtrop@unh.edu',\n 'maintainer': 'Maurik Holtrop',\n 'maintainer_email': 'Mauirk.Holtrop@unh.edu',\n 'packages': packages,\n 'package_data': {'PyGeom': package_data},\n 'scripts': scripts,\n 'license': 'open-source',\n}\n\ndistutils.core.setup(**setup_kwargs)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"479837908","text":"def get_distance(p, q):\n result = abs(p[0] - q[0]) + abs(p[1] - q[1])\n\n return result\n\n\ndef get_head(head, side):\n CARDINAL_POINTS = [\"north\", \"east\", \"south\", \"west\"]\n cardinal_point = CARDINAL_POINTS.index(head)\n\n if side == \"L\":\n cardinal_point -= 1\n elif side == \"R\":\n cardinal_point += 1\n\n if cardinal_point < 0:\n cardinal_point = len(CARDINAL_POINTS)-1\n\n if cardinal_point == len(CARDINAL_POINTS):\n cardinal_point = 0\n\n return CARDINAL_POINTS[cardinal_point]\n\n\ndef get_new_position(old_position, head, steps):\n result = old_position\n\n if head == \"north\":\n result[1] += steps # Add to y\n elif head == \"east\":\n result[0] += steps # Add to x\n elif head == \"south\":\n result[1] -= steps # Subtract from y\n elif head == \"west\":\n result[0] -= steps # Subtract from x\n\n return result\n\n\ndef solve(puzzle_input):\n position = [0, 0]\n puzzle_input = puzzle_input.split(\", \")\n\n head = \"north\"\n history = [[0, 0]]\n\n for instruction in puzzle_input:\n side, steps = (instruction[0], instruction[1:])\n steps = int(steps)\n head = get_head(head, side)\n # position = get_new_position(position, head, steps)\n current_position = position[:]\n for i in range(1, steps+1):\n position = get_new_position(current_position[:], head, i)\n if position not in history:\n history.append(position[:])\n else:\n\n # If I learned anything while working on AoC, then it's that you should always explicitly copy lists\n # (`new_list = old_list[:]`). You will not see that you're using a reference until it's too late.\n\n result = get_distance([0, 0], position)\n\n return result\n\n\ndef main():\n f = open(\"puzzle_input\")\n puzzle_input = f.read()\n f.close()\n\n solution = solve(puzzle_input)\n print(solution)\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"2016/1/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"389192244","text":"import win32api,win32con,win32gui\r\nfrom ctypes import *\r\nimport time\r\nfrom PIL import ImageGrab\r\nimport smtplib\r\nfrom email.mime.text import MIMEText\r\nfrom email.mime.multipart import MIMEMultipart\r\nfrom email.header import Header\r\n\r\ndef mouse_move(x,y):\r\n\twindll.user32.SetCursorPos(x,y)\r\n\t\r\ndef mouse_click(x=None,y=None):\r\n\tif not x is None and not y is None:\r\n\t\tmouse_move(x,y)\r\n\t\ttime.sleep(0.05)\r\n\t\twin32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0,0,0)\r\n\t\ttime.sleep(0.05)\r\n\t\twin32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0,0,0)\r\n\t\t\r\ndef mouse_double_click(x=None,y=None):\r\n\tif not x is None and not y is None:\r\n\t\tmouse_move(x,y)\r\n\t\ttime.sleep(0.05)\r\n\t\twin32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0,0,0)\r\n\t\twin32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0,0,0)\r\n\t\ttime.sleep(0.01)\r\n\t\twin32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0,0,0)\r\n\t\twin32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0,0,0)\r\n\r\ndef get_pixel_colour(i_x, i_y):\r\n i_desktop_window_id = win32gui.GetDesktopWindow()\r\n i_desktop_window_dc = win32gui.GetWindowDC(i_desktop_window_id)\r\n long_colour = win32gui.GetPixel(i_desktop_window_dc, i_x, i_y)\r\n i_colour = int(long_colour)\r\n return (i_colour & 0xff), ((i_colour >> 8) & 0xff), ((i_colour >> 16) & 0xff)\r\n\t\t\r\nmouse_double_click(20,9)\t#点击左上角图标\r\ntime.sleep(1)\r\nmouse_click(758,340)\t#连接\r\ntime.sleep(40)\r\nmouse_click(1014, 104)\t#防激活\r\ntime.sleep(10)\r\nmouse_click(1013, 82)\t#防验证\r\ntime.sleep(2)\r\nmouse_click(212,747)\t#打开浏览器\r\ntime.sleep(10)\r\nmouse_click(134, 78)\t#点击收藏栏\r\ntime.sleep(15)\r\nmouse_click(487, 534)\t#登录\r\ntime.sleep(5)\r\nmouse_click(1077, 73)\t#签到\r\ntime.sleep(5)\r\nmouse_click(557, 332)\t#提交\r\ntime.sleep(5)\r\nmouse_click(805, 165)\t#确定\r\ntime.sleep(1)\r\nim = ImageGrab.grab()\r\nim.save('D:/aaa.jpeg','jpeg')\r\ntime.sleep(2)\r\nmouse_click(1351, 8)\t#关闭浏览器\r\ntime.sleep(1)\r\nmouse_move(715, 0)\t#鼠标移到顶上\r\ntime.sleep(3)\r\nmouse_click(949, 12)\t#关闭远程连接\r\ntime.sleep(2)\r\nmouse_click(766, 444)\t#确定\r\n\r\n# 第三方 SMTP 服务\r\nmail_host=\"smtp.qq.com\" #设置服务器\r\nmail_user=\"hohahe@qq.com\" #用户名\r\nmail_pass=\"qkrnqwtapfvdbgff\" #口令 \r\n\r\nsender = 'hohahe@qq.com'\r\nreceivers = ['hohahe@qq.com'] # 接收邮件,可设置为你的QQ邮箱或者其他邮箱\r\n\r\n#创建一个带附件的实例\r\nmessage = MIMEMultipart()\r\nmessage['From'] = Header(\"李冬冬\", 'utf-8')\r\nmessage['To'] = Header(\"王大皮\", 'utf-8')\r\nmessage['Subject'] = Header('开会通知', 'utf-8')\r\n#邮件正文内容\r\nmessage.attach(MIMEText('来B504开会EWFEFEFWFWEFWEFWFWEFWEF', 'plain', 'utf-8'))\r\n# 构造附件1,传送当前目录下的 test.txt 文件\r\natt1 = MIMEText(open('D:/aaa.jpeg', 'rb').read(), 'base64', 'utf-8')\r\natt1[\"Content-Type\"] = 'application/octet-stream'\r\n# 这里的filename可以任意写,写什么名字,邮件中显示什么名字\r\natt1[\"Content-Disposition\"] = 'attachment; filename=\"aaa.jpeg\"'\r\nmessage.attach(att1)\r\n\r\ntry:\r\n\tsmtpObj = smtplib.SMTP_SSL()\r\n\tsmtpObj.connect(mail_host, 465) # 25 为 SMTP 端口号\r\n\tsmtpObj.login(mail_user,mail_pass)\r\n\tsmtpObj.sendmail(sender, receivers, message.as_string())\r\n\tprint(\"邮件发送成功\")\r\nexcept Exception as err:\r\n\tprint(err)","sub_path":"sign.pyw","file_name":"sign.pyw","file_ext":"pyw","file_size_in_byte":3258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"347401869","text":"\"\"\" Code common to fireweather bots\n\"\"\"\nfrom abc import ABC, abstractmethod\nimport logging\nimport re\nimport os\nfrom tempfile import TemporaryDirectory\nfrom urllib.parse import urljoin\nfrom pathlib import PurePath\nfrom requests import Session, HTTPError\nfrom requests_ntlm import HttpNtlmAuth\nfrom app import config\nfrom app.stations import get_stations_synchronously\n\n\nBC_FIRE_WEATHER_BASE_URL = 'https://bcfireweatherp1.nrs.gov.bc.ca'\nBC_FIRE_WEATHER_ENDPOINT = 'Scripts/Public/Common/Results_Report.asp'\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CSVNotFoundException(Exception):\n \"\"\" Exception thrown if CSV is not found \"\"\"\n\n\nclass AuthenticationException(Exception):\n \"\"\" Exception thrown if there's any issue authenticating \"\"\"\n\n\ndef _authenticate_session(session: Session) -> Session:\n \"\"\" Authenticate the session using NTLM auth\n \"\"\"\n password = config.get('BC_FIRE_WEATHER_SECRET')\n user = config.get('BC_FIRE_WEATHER_USER')\n logger.info('Authenticating user %s at %s', user, BC_FIRE_WEATHER_BASE_URL)\n resp = session.get(BC_FIRE_WEATHER_BASE_URL,\n auth=HttpNtlmAuth('idir\\\\'+user, password))\n\n if resp and re.search(r\"server error\", resp.text, re.IGNORECASE):\n raise AuthenticationException(\n \"Server Error occurred while authenticating user. \\n {}\".format(resp.text))\n\n return session\n\n\ndef _infer_csv_url(content: str):\n \"\"\" Infer the CSV url from the request response and the base url \"\"\"\n search_result = re.search(r\"fire_weather\\/csv\\/.+\\.csv\", content)\n if not search_result:\n raise CSVNotFoundException(\"Couldn't find the csv url. Content: {}\".format(content))\n logger.info('CSV file identified as %s', search_result.group(0))\n file_path = search_result.group(0)\n return urljoin(BC_FIRE_WEATHER_BASE_URL, file_path)\n\n\ndef _request_csv_url(session: Session, request_body: dict):\n \"\"\" Submit the POST request to query hourlies for the station, and infer\n the url of the CSV from the response.\n \"\"\"\n # Construct the url.\n url = urljoin(BC_FIRE_WEATHER_BASE_URL, BC_FIRE_WEATHER_ENDPOINT)\n # Do the post.\n response = session.post(url, data=request_body)\n if response.status_code != 200:\n # Raise an exception if we don't get a 200 response.\n error_message = 'Received status code: {} (expecting 200)'.format(response.status_code)\n raise HTTPError(error_message, response=response)\n # Extract csv url.\n return _infer_csv_url(response.text)\n\n\ndef _get_csv_response(session: Session, url: str):\n return session.get(\n url,\n auth=HttpNtlmAuth('idir\\\\'+config.get('BC_FIRE_WEATHER_USER'),\n config.get('BC_FIRE_WEATHER_SECRET'))\n )\n\n\ndef get_station_names_to_codes() -> dict:\n \"\"\" Helper function to create dictionary of (station_name: station_code) key-value pairs\n Is used when replacing station names with station IDs in dataframe\n \"\"\"\n stations = get_stations_synchronously()\n station_codes = {\n station.name: station.code for station in stations\n }\n # have to hack this, because BC FireWeather API spells a certain station 'DARCY'\n # while our weather_stations.json spells the station 'D'ARCY'\n station_codes['DARCY'] = station_codes.pop('D\\'ARCY')\n return station_codes\n\n\ndef _download_csv(\n session: Session,\n url: str,\n target_path: str) -> str:\n \"\"\" Fetch CSV of hourly actual weather for a station.\n \"\"\"\n response = _get_csv_response(session, url)\n\n csv_filename = PurePath(url).name\n target_filename = os.path.join(target_path, csv_filename)\n\n # Need to write response content to a CSV file - once the CSV file has been read, it will be deleted\n with open(target_filename, 'wb') as csv_file:\n csv_file.write(response.content)\n return target_filename\n\n\nclass BaseBot(ABC):\n \"\"\" Base class for the fire weather bots. The hourly and noon bots are essentially identical, except\n for what the request looks like, and how to process the data. \"\"\"\n\n @abstractmethod\n def construct_request_body(self):\n \"\"\" Code for constructing the request body that is used to request a CSV from the phase one website\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def process_csv(self, filename: str):\n \"\"\" Code for processing the CSV returned from the phase one website and insert it into a databas\n \"\"\"\n raise NotImplementedError()\n\n def run(self):\n \"\"\" Entry point for running the bot \"\"\"\n with Session() as session:\n # Authenticate with idir.\n _authenticate_session(session)\n # Build the request body.\n request_body = self.construct_request_body()\n # Get the CSV url.\n csv_url = _request_csv_url(session, request_body)\n with TemporaryDirectory() as temp_path:\n # Download csv into a temporary folder.\n filename = _download_csv(session, csv_url, temp_path)\n # Proces the csv.\n self.process_csv(filename)\n # Delete the file, now that we're done.\n os.remove(filename)\n logger.info('response from session: %s', csv_url)\n","sub_path":"api/app/fireweather_bot/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":5261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"363304419","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# © 2017-2019, ETH Zurich, Institut für Theoretische Physik\n# Author: Dominik Gresch \n\"\"\"\nDefines tests for the tbmodels.slice calculation.\n\"\"\"\n\nfrom __future__ import division, unicode_literals\n\n\ndef test_slice(\n configure_with_daemon, # pylint: disable=unused-argument\n sample,\n get_tbmodels_process_builder,\n check_calc_ok\n):\n \"\"\"\n Run the tbmodels.slice calculation and check that it outputs\n a tight-binding model.\n \"\"\"\n from aiida.plugins import DataFactory\n from aiida.orm import List\n from aiida.engine import run_get_node\n\n builder = get_tbmodels_process_builder('tbmodels.slice')\n\n SinglefileData = DataFactory('singlefile') # pylint: disable=invalid-name\n builder.tb_model = SinglefileData(file=sample('model.hdf5'))\n\n builder.slice_idx = List(list=[0, 3, 2, 1])\n\n output, calc = run_get_node(builder)\n check_calc_ok(calc)\n assert isinstance(output['tb_model'], SinglefileData)\n","sub_path":"tests/test_slice.py","file_name":"test_slice.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"281270770","text":"from tool.tool import *\nfrom dianxin.feat1 import *\n\n\ndata_path = 'C:/Users/cui/Desktop/python/dianxin/data/'\nd = {89950166: 1, 89950167: 2, 89950168: 5, 90063345: 0, 90109916: 4,\n 90155946: 8, 99999825: 10, 99999826: 7, 99999827: 6, 99999828: 3, 99999830: 9}\nrd = {0: 90063345, 1: 89950166, 2: 89950167, 3: 99999828, 4: 90109916,\n 5: 89950168, 6: 99999827, 7: 99999826, 8: 90155946, 9: 99999830, 10: 99999825}\ncc = ['service_type', 'is_mix_service','is_promise_low_consume',\n'net_service', 'gender', 'age', 'online_time','contract_type',\n'1_total_fee','2_total_fee', '3_total_fee', '4_total_fee',\n'month_traffic','last_month_traffic', 'local_trafffic_month',\n'local_caller_time', 'service1_caller_time','service2_caller_time',\n'many_over_bill', 'contract_time','pay_times', 'pay_num']\nprint('读取train数据...')\ntrain = pd.read_csv(data_path + 'train.csv')\ntrain['label'] = train['current_service'].map(d).astype(int)\ntrain = train.drop_duplicates(cc)\ntest = pd.read_csv(data_path + 'test.csv')\ndata = train.append(test)\n# test['label'] = np.nan\n\n\nprint('构造特征...')\ndata_feat = make_feat(data,'online')\n\nprint('切分数据...')\ntest_feat = data_feat[data_feat['user_id'].isin(test['user_id'])].copy()\ntrain_feat = data_feat[data_feat['user_id'].isin(train['user_id'])].copy()\n\npredictors = train_feat.columns.drop(['user_id', 'current_service', 'label'])\n\n\n\nprint('开始CV 5折训练...')\nscores = []\nt0 = time.time()\nmean_score = []\ntrain_preds = np.zeros((len(train_feat),11))\ntest_preds = np.zeros((len(test_feat),11))\nxgb_test = xgb.DMatrix(test_feat[predictors])\nkf = KFold(len(train_feat), n_folds = 5, shuffle=True, random_state=520)\nfor i, (train_index, test_index) in enumerate(kf):\n xgb_train = xgb.DMatrix(train_feat[predictors].iloc[train_index], train_feat['label'].iloc[train_index])\n xgb_eval = xgb.DMatrix(train_feat[predictors].iloc[test_index], train_feat['label'].iloc[test_index])\n\n print('开始训练...')\n param = {'objective': 'multi:softprob',\n 'eta': 0.1,\n 'max_depth': 6,\n 'silent': 1,\n 'num_class': 11,\n 'eval_metric': \"mlogloss\",\n 'min_child_weight': 3,\n 'subsample': 0.7,\n 'colsample_bytree': 0.7,\n 'seed': 66\n }\n watchlist = [(xgb_train, 'train'), (xgb_eval, 'val')]\n\n clf = xgb.train(param,\n xgb_train,\n num_boost_round=3000,\n evals=watchlist,\n verbose_eval=50,\n early_stopping_rounds=50)\n\n train_preds[test_index] += clf.predict(xgb_eval)\n test_preds += clf.predict(xgb_test)\n\npreds = test_preds.copy()/5\nint_preds = pd.Series(test_preds.argmax(axis=1))\ntrain_preds = pd.DataFrame(train_preds,columns=[str(i)+'_xgb1' for i in range(11)])\ntrain_preds['user_id'] = train_feat['user_id'].values\ntest_preds = pd.DataFrame(test_preds/5,columns=[str(i)+'_xgb1' for i in range(11)])\ntest_preds['user_id'] = test_feat['user_id'].values\ndata_preds = train_preds.append(test_preds)\ndata_preds.to_csv( r'C:\\Users\\cui\\Desktop\\python\\dianxin\\submission\\data_preds_xgb1.csv', index=False)\n\nint_preds = pd.Series(preds.argmax(axis=1))\ntest_feat['current_service'] = int_preds.map(rd).values\nprint('预估得分: {}'.format(exp_multi_f1(preds,int_preds)**2))\ntest_feat[['user_id','current_service']].to_csv(r'C:\\Users\\cui\\Desktop\\python\\dianxin\\submission\\xindai_sumbmission_xgb1_{}.csv'.format(\n datetime.datetime.now().strftime('%Y%m%d_%H%M%S')),index=False)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"dianxin/cv_submission/cv_submission_xgb2.py","file_name":"cv_submission_xgb2.py","file_ext":"py","file_size_in_byte":3563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"12934658","text":"#!/usr/local/bin/python3\n# f=open('/tmp/passwd')\n# data=f.read()\n# print(data)\n# data=f.read()\n# print(data)\n# f.close()\n# f=open('/tmp/passwd')\n# data=f.read(4)\n# print(data)\n# data=f.readline()\n# print(data)\n# data=f.readlines()\n# print(data)\n# f.close()\n# f=open('/tmp/passwd')\n# for line in f :\n# print(line,end='')\n# f.close()\n# f=open('/root/1.jpg','rb')\n# print(f.read(4096))\n# f.close()\n# f=open('/tmp/test','w')\n# f.write('hello')\n# f.flush()\n# f.writelines(['world\\n','new'])\n# f.close()\n# with open('/tmp/passwd') as f:\n# print(f.readline(),end='')\n# #print(f.readlines())\n# f=open('/tmp/passwd','rb')\n# print(f.tell())\n# print(f.read(4))\n# print(f.tell())\n# f.seek(2,1)\n# print(f.tell())\n# f.seek(-5,2)\n# print(f.tell())\n# f.seek(0,0)\n# print(f.tell())\n######################################################\n# def mk_fib(length=8):\n# \"说明文件:这是一个兔子数列\"\n# fib=[0,1]\n# for i in range(length-len(fib)):\n# fib.append(fib[-1]+fib[-2])\n# return fib\n# print('兔子数列示例:')\n# exam=mk_fib()\n# print(exam)\n# print('-'*50)\n# n=int(input('想生成的数列长度:'))\n# print(mk_fib(n))\n################################################\n# import sys\n# def copy(src_name,dst_name):\n# src_f=open(src_name,'rb')\n# dst_f=open(dst_name,'wb')\n# while True:\n# data=src_f.read(4096)\n# if not data:\n# break\n# dst_f.write(data)\n# src_f.close()\n# dst_f.close()\n# copy(sys.argv[1],sys.argv[2])\n#################################################\n# import my_model as my\n# from random import randint\n# print(my.hi)\n# my.print_star()\n# print(randint(1,10))\n################################################\n# import random\n# def key(num):\n# for i in range(num):\n# one_key=int(random.randint(48,123))\n# print(random.choice(chr(one_key)),end='')\n# print()\n# num=int(input('密码的位数:'))\n# if num > 0:\n# key(num)\n##############################################\nimport random\nimport string\nkey_poll=string.ascii_letters+string.digits\nwhile True:\n num=int(input('密码的位数:'))\n if num > 0:\n user_key=random.sample(key_poll,num)\n print(''.join(user_key))\n break\n else:\n print('密码位数不合规,请重新输入')\n","sub_path":"day2.py","file_name":"day2.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"414244049","text":"from __future__ import print_function\nimport requests\nfrom bs4 import BeautifulSoup\nimport datetime\nimport time\nimport re\nfrom googleapiclient.discovery import build\nfrom httplib2 import Http\nfrom oauth2client import file, client, tools\n\n\n\ndef main():\n\tn = 1\n\tCAL = gcal_login()\n\twith open('output.txt', 'wt') as clear:\n\t\tclear.write('Data:' + '\\n')\n\twith open('case_number_list', 'r+') as inwards:\n\t\tfor caseNO in inwards:\n\t\t\tcaseNO = caseNO.rstrip()\n\t\t\tgather_soup(caseNO, n)\n\t\t\ttrim_soup()\n\t\t\twith open('output.txt','at') as outwards:\n\t\t\t\tdate_List = universalize(parse_chunk(chunk_creation()))\n\t\t\t\toutwards.write(caseNO + ': \\n' + str(date_List) + '\\n')\n\t\t\tevent_creation(date_List, CAL, caseNO)\n\n\n\n\n\t\t\ttime.sleep(1)\n\n\t\t\tn = n + 1\n\n\ndef gather_soup(caseNO, n):\n\n\turl = 'https://www.lacourt.org/casesummary/ui/' \t\t\t\t#destination url\n\tpayload = {'CaseNumber':caseNO} \t\t\t\t\t\t\t#name of input field: input query\n\n\n\n\tprint('contacting website for case no. ' + str(n) + \": \" + caseNO + \"...\")\n\tr = requests.get(url, params=payload)\t\t\t\t\t\t\t#results of query called 'r'\n\n\n\t#write html file. just in case\n\tprint('contacted')\n\twith open(\"requests_results.html\", \"wb\") as f:\n\t\tf.write(r.content)\n\twith open(\"requests_results.html\", \"r+b\") as f:\n\t\tdata = f.read()\n\n\t#parse for text only\n\tsoup = BeautifulSoup(data, 'html.parser')\n\tsoupText = soup.get_text()\n\tsoupText = soupText.encode('utf-8')\n\tprint ('text parsed')\n\twith open(\"text_only.txt\", \"wb\") as f:\n\t\tf.write(soupText)\n\n\n\n\n#remove chunk of irrelevant text\ndef trim_soup():\t\t\t\t\t\t\t\t\t\t\t\t\t \n\twith open(\"text_only.txt\", \"rt\") as f:\n\t\t#begin loop through text\n\t\tfor line in f:\n\t\t\tif \"CASE INFORMATION\" in line:\n\t\t\t\t#interrupt loop at start of key info\n\t\t\t\tbreak\n\n\t\t#write new file starting at interruption\n\t\twith open(\"relevant_text.txt\",\"wt\") as out:\n\t\t\tout.writelines(f)\n\n\n\n\n\n\n\n# attempt to pull out key dates and info from relevant_text.txt\ndef chunk_creation():\n\t#create list of data chunks\n\tchunkL = []\n\tbyWord = []\n\twith open(\"relevant_text.txt\",\"rt+\") as f:\n\t\tfor line in f:\n\t\t\t#pull filing date\n\t\t\tif 'VS' in line:\n\t\t\t\tcaseName = str(line)\n\t\t\t\tchunkL.append(caseName)\n\t\t\tif \"Filing Date:\" in line:\n\t\t\t\tFiling = str(line)\n\t\t\t\t#append list w filing date\n\t\t\t\tchunkL.append(Filing)\n\t\t\t# if \"at\" in line:\n\t\t\t# \tat = str(line)\n\t\t\t# \tchunkL.append(at)\n\t\t\tif \"Final Status Conference\" in line:\n\t\t\t\t'/n'\n\t\t\t\tFSC = str(line)\n\t\t\t\t#append list w data chunk\n\t\t\t\tchunkL.append(FSC)\n\t\t\t\t#break after first trigger\n\t\t\t\tbreak\n\t\tdataChunk = ''.join(chunkL)\n\t\tdataChunk = dataChunk.split()\n\n\t\tfor item in dataChunk:\n\t\t\tbyWord.append(item)\n\t\t#create final file, create list from dictionary, write list to file. Move this to parse_chunk later on\n\t\twith open(\"final_text.txt\",\"wt+\") as out:\n\t\t\tfor x in byWord:\n\t\t\t\tout.writelines(x + '\\n')\n\n\treturn byWord\n\n\n\n\ndef parse_chunk(byWord):\n\t#modify byWord to break between info points\n\tindexFiling = byWord.index('Filing')\n\tName = byWord[0:indexFiling]\n\tName = \"Name: \" + ' '.join(Name)\n\tFiling = byWord[indexFiling:indexFiling + 3]\n\n\tfilingDate = Filing[2].split('/')\n\tfilingMonth = int(filingDate[0])\n\tfilingDay = int(filingDate[1])\n\tfilingYear = int(filingDate[2])\n\n\tfilingDateTime = datetime.date(filingYear, filingMonth, filingDay).isoformat()\n\tFiling = \"Filing Date: \" + filingDateTime\n\n\tif 'Conference' and 'Trial' and 'Dismissal' in byWord:\n\t\tindexConference = byWord.index('Conference')\n\t\tindexTrial = byWord.index('Trial')\n\t\tindexDismissal = byWord.index('Dismissal')\n\t\t#indexName = byWord.index('VS')\n\n\t\tFSC = byWord[indexFiling+3:indexConference-1]\n\t\tTrial = byWord[indexConference:indexTrial]\n\t\tOSC = byWord[indexTrial:indexDismissal]\n\n\n\n\t\t#pull dates for gcal integration\n\n\n\t\tFSCdate = FSC[0].split('/')\n\t\tFSCmonth = int(FSCdate[0])\n\t\tFSCday = int(FSCdate[1])\n\t\tFSCyear = int(FSCdate[2])\n\t\tFSCtime = FSC[2].split(':')\n\t\tFSChour = int(FSCtime[0])\n\t\tFSCminute = int(FSCtime[1])\n\n\t\ttrialDate = Trial[1].split('/')\n\t\ttrialMonth = int(trialDate[0])\n\t\ttrialDay = int(trialDate[1])\n\t\ttrialYear = int(trialDate[2])\n\t\ttrialTime = Trial[3].split(':')\n\t\ttrialHour = int(trialTime[0])\n\t\ttrialMinute = int(trialTime[1])\n\n\t\tOSCdate = OSC[1].split('/')\n\t\tOSCmonth = int(OSCdate[0])\n\t\tOSCday = int(OSCdate[1])\n\t\tOSCyear = int(OSCdate[2])\n\t\tOSCtime = OSC[3].split(':')\n\t\tOSChour = int(OSCtime[0])\n\t\tOSCminute = int(OSCtime[1])\n\n\n\n\n\t\tFSCdatetime = datetime.datetime(FSCyear, FSCmonth, FSCday, FSChour, FSCminute).isoformat()\n\n\n\t\ttrialDateTime = datetime.datetime(trialYear, trialMonth, trialDay, trialHour, trialMinute).isoformat()\n\n\n\t\tOSCdatetime = datetime.datetime(OSCyear, OSCmonth, OSCday, OSChour, OSCminute).isoformat()\n\n\n\n\n\n\n\t\t#create return values for output to text file.\n\n\t\tTrial = \"Trial: \" + trialDateTime + ' ' + ' '.join(Trial[5:len(Trial)-2])\n\t\tFSC = \"FSC: \" + FSCdatetime + ' ' + ' '.join(FSC[5:len(FSC)-1])\n\t\tOSC = \"OSC: \" + OSCdatetime + ' ' + ' '.join(OSC[6:len(OSC)-3])\n\n\n\n\n\t\tparsed_data = Name + '\\n' + Filing + '\\n' + Trial + '\\n' + FSC + '\\n' + OSC\n\telse:\n\t\tfor item in byWord:\n\t\t\tif item == 'at':\n\t\t\t\tatIndex = byWord.index(item)\n\t\t\t\ttempDate = byWord[atIndex-1]\n\t\t\t\ttempTime = byWord[atIndex+1]\n\t\t\t\ttempTime = tempTime.split(':')\n\t\t\t\ttempDate = tempDate.split('/')\n\t\t\t\tdateMonth = int(tempDate[0])\n\t\t\t\tdateDay = int(tempDate[1])\n\t\t\t\tdateYear = int(tempDate[2])\n\t\t\t\tdateHour = int(tempTime[0])\n\t\t\t\tdateMinute = int(tempTime[1])\n\n\t\t\t\ttempDateTime = datetime.datetime(dateYear, dateMonth, dateDay, dateHour, dateMinute)\n\t\t\t\tnow = datetime.datetime.now()\n\t\t\t\tif tempDateTime > now:\n\t\t\t\t\t#keyWord =\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tpass\n\n\n\n\n\n\n\n\n\n\t\tparsed_data = Name + '\\n' + Filing + '\\n'\n\n\n\n\n\treturn parsed_data\n\ndef universalize(date_string):\n\tdate_list = re.split('[ \\n]', date_string)\n\tprint(date_list)\n\treturn date_list\n\n\n# def event_creation(date_list):\nSCOPES = 'https://www.googleapis.com/auth/calendar'\n\ndef gcal_login():\n\tstore = file.Storage('token.json')\n\tcreds = store.get()\n\tif not creds or creds.invalid:\n\t\tflow = client.flow_from_clientsecrets('credentials.json', SCOPES)\n\t\tcreds = tools.run_flow(flow, store)\n\tCAL = build('calendar', 'v3', http=creds.authorize(Http()))\n\treturn CAL\n\n\ndef event_creation(date_list, CAL, caseNO):\n\tindexName = date_list.index('Name:')\n\tindexEndName = date_list.index('VS')\n\tcaseName = date_list[indexName+1:indexEndName]\n\tcaseName = ' '.join(caseName)\n\tfor item in date_list:\n\t\tif item == 'FSC:':\n\t\t\t#define terms\n\t\t\tindexFSC = date_list.index('FSC:')\n\t\t\tFSCdate = date_list[indexFSC+1]\n\t\t\tkeyWord = 'FSC'\n\t\t\tindexFSCend = date_list.index('OSC:')\n\t\t\tlocation = date_list[indexFSC+2:indexFSCend]\n\n\t\t\t#call check_event()\n\t\t\tresult = check_event(caseName, keyWord, FSCdate, CAL)\n\n\t\t\tif result == 1:\n\t\t\t\t#preexisting event found on correct date\n\t\t\t\t#print('Preexisting event found for ' + keyWord + ' for ' + caseName + ' on correct date: ' + FSCdate)\n\t\t\t\tpass\n\t\t\telif (result != 1) and (result != 2):\n\t\t\t\t#preexisting event found on incorrect date\n\t\t\t\t#change date function\n\t\t\t\teventId = result\n\t\t\t\tchange_event(eventId, FSCdate, CAL)\n\t\t\t\tprint('Preexisting event found for ' + keyWord + ' for ' + caseName + ' on INCORRECT DATE. Date has been changed to ' + FSCdate)\n\t\t\telif result == 2:\n\t\t\t\t#no preexisting event found\n\t\t\t\t#proceed to insert_event():\n\n\t\t\t\t# construct endTime\n\t\t\t\tdt = datetime.datetime.fromisoformat(FSCdate)\n\t\t\t\tenddateTime = dt + datetime.timedelta(hours=1)\n\t\t\t\tendTime = enddateTime.isoformat()\n\n\t\t\t\t# construct summaryString\n\t\t\t\tsummaryString = keyWord + ' - ' + caseName\n\n\t\t\t\t# construct location\n\n\t\t\t\tinsert_event(FSCdate, endTime, summaryString, location, caseNO, CAL)\n\t\t\t\tprint('No event for ' + keyWord + ' for ' + caseName + ' found. Event created on ' + FSCdate)\n\n\t\tif item == 'OSC:':\n\t\t\t#define terms\n\t\t\tindexOSC = date_list.index('OSC:')\n\t\t\tOSCdate = date_list[indexOSC+1]\n\t\t\tkeyWord = 'OSC'\n\t\t\tlocation = date_list[indexOSC+2:]\n\n\t\t\t#call check_event()\n\t\t\tresult = check_event(caseName, keyWord, OSCdate, CAL)\n\t\t\tif result == 1:\n\t\t\t\t#preexisting event found on correct date\n\t\t\t\t#print('Preexisting event found for ' + keyWord + ' for ' + caseName + ' on correct date: ' + OSCdate)\n\t\t\t\tpass\n\t\t\telif (result != 1) and (result != 2):\n\t\t\t\t#preexisting event found on incorrect date\n\t\t\t\t#change date function\n\t\t\t\teventId = result\n\t\t\t\tchange_event(eventId, OSCdate, CAL)\n\t\t\t\tprint('Preexisting event found for ' + keyWord + ' for ' + caseName + ' on INCORRECT DATE. Date has been changed to ' + OSCdate)\n\t\t\telif result == 2:\n\t\t\t\t#no preexisting event found\n\t\t\t\t#proceed to insert_event():\n\t\t\t\t# construct endTime\n\t\t\t\tdt = datetime.datetime.fromisoformat(OSCdate)\n\t\t\t\tenddateTime = dt + datetime.timedelta(hours=1)\n\t\t\t\tendTime = enddateTime.isoformat()\n\n\t\t\t\t# construct summaryString\n\t\t\t\tsummaryString = keyWord + ' - ' + caseName\n\n\t\t\t\t# construct location\n\n\t\t\t\tinsert_event(OSCdate, endTime, summaryString, location, caseNO, CAL)\n\t\t\t\tprint('No event for ' + keyWord + ' for '+ caseName + ' found. Event created on ' + OSCdate)\n\n\t\tif item == 'Trial:':\n\t\t\t#define terms\n\t\t\tindexTrial = date_list.index('Trial:')\n\t\t\tTrialdate = date_list[indexTrial+1]\n\t\t\tkeyWord = 'Trial'\n\t\t\tindexLocEnd = date_list.index('FSC:')\n\t\t\tlocation = date_list[indexTrial+2:indexLocEnd]\n\n\t\t\t#call check_event()\n\t\t\tresult = check_event(caseName, keyWord, Trialdate, CAL)\n\n\t\t\tif result == 1:\n\t\t\t\t#preexisting event found on correct date\n\t\t\t\t#print('Preexisting event found for ' + keyWord + ' for ' + caseName + ' on correct date: ' + Trialdate)\n\t\t\t\tpass\n\t\t\telif (result != 1) and (result) != 2:\n\t\t\t\t#preexisting event found on incorrect date\n\t\t\t\t#change date function\n\t\t\t\tprint('Preexisting event found for ' + keyWord + ' for ' + caseName + ' on INCORRECT DATE. Changing event to ' + Trialdate)\n\n\t\t\t\teventId = result\n\t\t\t\tchange_event(eventId, Trialdate, CAL)\n\t\t\t\tprint(keyWord + ' date has been changed to ' + Trialdate)\n\t\t\telif result == 2:\n\t\t\t\t#no preexisting event found\n\t\t\t\t#proceed to insert_event():\n\n\t\t\t\t#construct endTime\n\t\t\t\tdt = datetime.datetime.fromisoformat(Trialdate)\n\t\t\t\tenddateTime = dt + datetime.timedelta(hours=1)\n\t\t\t\tendTime = enddateTime.isoformat()\n\n\t\t\t\t#construct summaryString\n\t\t\t\tsummaryString = keyWord + ' - ' + caseName\n\n\n\t\t\t\tinsert_event(Trialdate, endTime, summaryString, location, caseNO, CAL)\n\t\t\t\tprint('No event for ' + keyWord + ' for '+ caseName + ' found. Event created on ' + Trialdate)\n\n\t\t# if item == 'Fee:':\n\t\t# \tindexJuryFee = index.date_list('Fee:')\n\t\t# \tJuryFeeDate = date_list[indexJuryFee+1]\n\t\t# \tkeyWord = 'Fee'\n\t\t# \tif check_event(caseName, keyWord, JuryFeeDate) == 1:\n\t\t# \t\t#preexisting event found on correct date\n\t\t# \t\tprint('Preexisting event found for ' + caseName + ' on ' + JuryFeeDate)\n\t\t# \t\treturn 0\n\t\t# \telif check_event(caseName, keyWord, JuryFeeDate) != 1 or 2:\n\t\t# \t\t#preexisting event found on incorrect date\n\t\t# \t\t#change date function\n\t\t# \t\tprint('Preexisting event found for ' + caseName + ' on INCORRECT DATE. Date has been changed to ' + JuryFeeDate)\n\t\t# \telif check_event(caseName, keyWord, JuryFeeDate) == 2:\n\t\t# \t\t#no preexisting event found\n\t\t# \t\t#proceed to insert_event():\n\t\t# \t\tprint('No event for ' + caseName + ' found. Event created on ' + JuryFeeDate)\n\t\t# \telse:\n\t\t# \t\tprint('No Fee: found')\n\n\n\n\ndef insert_event(startTime, endTime, eventSummary, eventLocation, caseNO, CAL):\n\n\tnow = str(datetime.datetime.now())\n\teventLocation = ' '.join(eventLocation)\n\tevent = {\n\t\t'summary': eventSummary,\n\n\t\t'description': 'Event created automatically using case no: ' + caseNO + ' on ' + now + '. Event located in ' + eventLocation,\n\t\t'start': {\n\t\t\t'dateTime': startTime,\n\t\t\t'timeZone': 'America/Los_Angeles',\n\t\t},\n\t\t'end': {\n\t\t\t'dateTime': endTime,\n\t\t\t'timeZone': 'America/Los_Angeles',\n\t\t},\n\t\t# 'attendees': [\n\t\t# \t{'email': 'molchanlaw@yahoo.com'},\n\t\t# ],\n\t\t'reminders': {\n\t\t\t'useDefault': False,\n\t\t\t'overrides': [\n\t\t\t\t{'method': 'email', 'minutes': 48 * 60},\n\n\t\t\t],\n\t\t},\n\t}\n\n\tevent = CAL.events().insert(calendarId='greylawcalendar@gmail.com', body=event).execute()\n\n\t'Event created: %s' % (event.get('htmlLink'))\n\ndef check_event(caseName, keyWord, dateTime, CAL):\n\n\tpage_token = None\n\twhile True:\n\t\tdt = datetime.datetime.fromisoformat(dateTime)\n\t\tdt = re.split('[ -]', str(dt))\n\t\tdtYear = int(dt[0])\n\t\tdtMonth = int(dt[1])\n\t\tdtDay = int(dt[2])\n\t\tdt = datetime.date(dtYear, dtMonth, dtDay).isoformat()\n\t\tkeyWord = keyWord.lower()\n\t\tevents = CAL.events().list(calendarId='greylawcalendar@gmail.com', pageToken=page_token, q=keyWord).execute()\n\t\t#below are events with keyWord present in title\n\t\tfor event in events['items']:\n\t\t\tsummaryString = str(event['summary']).lower()\n\t\t\tsummaryList = summaryString.split(' ')\n\t\t\tnameString = ''.join(caseName).lower()\n\t\t\tnameList = nameString.split(' ')\n\n\n\t\t\t#for each word in case name - if word present in title of event with keyWord in title:\n\n\t\t\tfor item in nameList:\n\t\t\t\tif item in summaryList:\n\t\t\t\t\t#print title of all events with both word from case name and keyword\n\t\t\t\t\tprint('Match Found: ' + str(item) + ' and ' + keyWord)\n\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\tstart = event['start']['dateTime']\n\n\n\t\t\t\t\texcept:\n\t\t\t\t\t\tstart = event['start']['date']\n\n\n\t\t\t\t\tstart = re.split('[- T:]', str(start))\n\n\t\t\t\t\tstartMonth = int(start[1])\n\t\t\t\t\tstartDay = int(start[2])\n\t\t\t\t\tstartYear = int(start[0])\n\t\t\t\t\tstart = datetime.date(startYear, startMonth, startDay).isoformat()\n\n\n\t\t\t\t\tif dt == start:\n\n\t\t\t\t\t\tprint('correct date')\n\t\t\t\t\t\treturn 1\n\n\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint('incorrect date')\n\n\t\t\t\t\t\tresults = event['id']\n\n\n\n\t\t\t\telse:\n\t\t\t\t\tresults = 2\n\n\n\n\n\n\n\t\t\t\t\t#check if startTime matches time\n\t\treturn results\n\n\t\tpage_token = events.get('nextPageToken')\n\n\t\tif not page_token:\n\t\t\tbreak\n\n\n\ndef change_event(eventId, dateTime, CAL):\n\tevent = CAL.events().get(calendarId='greylawcalendar@gmail.com', eventId=eventId).execute()\n\n\tdt = datetime.datetime.fromisoformat(dateTime)\n\tenddateTime = dt + datetime.timedelta(hours=1)\n\tdtISO = enddateTime.isoformat()\n\n\tevent['start']['dateTime'] = dateTime\n\tevent['end']['dateTime'] = dtISO\n\tprint(event)\n\n\tCAL.events().update(calendarId='greylawcalendar@gmail.com', eventId=eventId, body=event).execute()\n\nmain()","sub_path":"workbottest.py","file_name":"workbottest.py","file_ext":"py","file_size_in_byte":13959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"83806526","text":"import telebot\nimport json\nfrom test_analys import user_result, answer_reducer\n\nbot = telebot.TeleBot('token')\n\nkeyboard = telebot.types.InlineKeyboardMarkup()\none = telebot.types.InlineKeyboardButton(text='a', callback_data='А')\ntwo = telebot.types.InlineKeyboardButton(text='b', callback_data='В')\nkeyboard.add(one, two)\n\nquestions_list = []\nuser_answer = {}\n\ndef quest_put():\n with open('questList.json', 'r', encoding='utf-8') as file:\n questions = json.load(file)\n for quest in questions:\n questions_list.append(quest)\n\n\n@bot.message_handler(commands=['start'])\ndef hello_bot(message):\n chat_id = message.from_user.id\n text = 'С помощью методики К.Н. Томаса (1973), американского социального психолога, определяются типические способы ' \\\n 'реагирования на конфликтные ситуации. Можно выявить, насколько воспитатель склонен к соперничеству и ' \\\n 'сотрудничеству в коллективе, стремится к компромиссам, избегает конфликтов, или, наоборот, старается ' \\\n 'обострить их, а также оценить адаптации каждого члена коллектива к совместной педагогической деятельности.!' \\\n '\\nВведите /continue, что бы начать тест'\n bot.send_message(chat_id, text)\n\n\n@bot.message_handler(commands=['continue'])\ndef test_start(message):\n user_id = message.from_user.id\n text = questions_list[0]\n user_answer[user_id] = []\n bot.send_message(user_id, text, reply_markup=keyboard)\n\n\n@bot.callback_query_handler(func=lambda call: True)\ndef test_next(call):\n user_id = call.from_user.id\n index = questions_list.index(call.message.text) + 1\n answer = str(index) + call.data\n user_answer[user_id].append(answer)\n\n if index >= len(questions_list):\n text = 'Тест завершён!'\n bot.send_message(user_id, text)\n result = user_result(user_answer[user_id], answer_reducer)\n bot.send_message(user_id, result)\n return\n\n text = questions_list[index]\n bot.send_message(user_id, text, reply_markup=keyboard)\n\n\nif __name__ == '__main__':\n quest_put()\n bot.polling(none_stop=True)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"355970228","text":"import os\n\nfrom setuptools import setup, find_packages\n\ndef get_version(pkg_path):\n with open(os.path.join(pkg_path, '__init__.py'), 'r') as fp:\n for line in fp:\n if line.startswith('__version__'):\n return line.split('\"' if '\"' in line else \"'\")[1]\n\ndef get_readme_text():\n with open(\"README.md\", \"r\", encoding=\"utf8\") as fp:\n long_description = fp.read()\n return long_description\n\nsetup(name='align',\n version=get_version(\n os.path.join(\n os.path.abspath(os.path.dirname(__file__)),\n 'align')),\n description='Analog Layout Synthesis Package',\n long_description=get_readme_text(),\n long_description_content_type=\"text/markdown\",\n url='ALIGN-analoglayout/ALIGN-public.git',\n author='Parijat Mukherjee',\n author_email='parijat.mukherjee@intel.com',\n license='BSD-3-Clause',\n packages=find_packages(include=['align', 'align.*']),\n package_data={'align': ['config/*']},\n scripts=[\n 'bin/schematic2layout.py',\n 'bin/gds2png.sh'\n ],\n install_requires=[\n 'networkx>=2.4',\n 'python-gdsii',\n 'matplotlib',\n 'pyyaml',\n 'pybind11',\n 'pydantic>=1.8',\n 'z3-solver',\n 'more-itertools'\n ],\n setup_requires=[],\n python_requires='~=3.8',\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Environment :: Console',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: C++',\n 'Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)'\n ],\n zip_safe=False)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"140475397","text":"import discord\nfrom discord.ext import commands\n\nimport asyncio\n#\nimport json\nimport datetime\n\nclass Moderator(commands.Cog):\n def __init__(self, client):\n self.client = client\n\n @commands.command()\n @commands.has_permissions(ban_members=True)\n async def ban(self, ctx, user : discord.Member, *, reason=\"Ban Hammer Has Spoken\"):\n if user.id == ctx.author.id:\n await ctx.send(\"You cant ban yourself silly!\")\n return\n await user.ban(reason = reason)\n await ctx.send(f\"{user.name}#{user.discriminator} has been banned for {reason}\")\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def unban(self, ctx, user):\n banned_users = await ctx.guild.bans()\n member_name, member_discriminator = user.split(\"#\")\n\n for ban_entry in banned_users:\n user = ban_entry.user\n\n if (user.name, user.discriminator) == (member_name, member_discriminator):\n await ctx.guild.unban(user)\n await ctx.send(f'Unbanned {user.mention}')\n return\n\n @commands.command()\n @commands.has_permissions(kick_members=True)\n async def kick(self, ctx, user : discord.Member, *, reason):\n await user.kick(reason=reason)\n \n @commands.command()\n @commands.has_permissions(ban_members=True)\n async def tempban(self, ctx, user: discord.Member, duration: int, *, reason=\"Ban Hammer has spoken\"):\n await user.ban(reason=reason)\n with open(\"assets/json/sanctions.json\", \"r\") as f:\n data = json.load(f)\n\n time = datetime.datetime.today() + datetime.timedelta(minutes=duration)\n\n tb = data[\"tempbans\"][str(ctx.guild.id)]\n\n tb.update({f\"{user.name}#{user.discriminator}\": {\"year\": time.year, \"month\": time.month, \"day\": time.day, \"hour\": time.hour, \"mins\": time.minute}})\n\n with open(\"assets/json/sanctions.json\", \"w\") as f:\n json.dump(data, f, indent=2)\n\n @commands.command()\n @commands.has_permissions(kick_members=True)\n async def warn(self, ctx, user: discord.Member, *, reason=\"You Have Been Warned!\"):\n with open(\"assets/json/sanctions.json\", \"r\") as f:\n data = json.load(f)\n\n gdata = data[str(ctx.guild.id)]\n wdata = gdata[\"warnings\"]\n\n id = 0\n\n for i in wdata:\n if int(i) >= id:\n id = int(i)+1\n \n\n wdata.update({id: [f\"{user.name}#{user.discriminator}\", reason, str(datetime.datetime.now())]})\n\n with open(\"assets/json/sanctions.json\", \"w\") as f:\n json.dump(data, f, indent=2)\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def view_warns(self, ctx, user: discord.Member = None):\n with open(\"assets/json/sanctions.json\", \"r\") as f:\n data = json.load(f)\n\n embed = discord.Embed(title=\"Warnings\")\n warns = data[str(ctx.guild.id)][\"warnings\"]\n\n for i in warns:\n if user == None:\n embed.add_field(name=f\"{i}: {warns[i][0]}\", value=f\"Reason: {warns[i][1]}\\nTimestamp: {warns[i][2][:19]}\\n.\", inline=False)\n else:\n if warns[i][0] == f\"{user.name}#{user.discriminator}\":\n embed.add_field(name=f\"{i}: {warns[i][0]}\", value=f\"Reason: {warns[i][1]}\\nTimestamp: {warns[i][2][:19]}\\n.\", inline=False)\n \n await ctx.send(embed=embed)\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def del_warn(self, ctx, id, *args):\n with open(\"assets/json/sanctions.json\", \"r\") as f:\n data = json.load(f)\n\n wdata = data[str(ctx.guild.id)][\"warnings\"]\n\n if id in wdata:\n del wdata[id]\n\n with open(\"assets/json/sanctions.json\", \"w\") as f:\n json.dump(data, f, indent=2)\n\n @commands.command()\n @commands.has_permissions(manage_messages=True)\n async def purge(self, ctx, amount=10, user: discord.Member = None):\n channel = ctx.channel\n amount += 1\n if user == None:\n await channel.purge(limit=amount)\n else:\n await channel.purge(limit=amount, check=lambda message: message.author == ctx.author)\n \n\ndef setup(client):\n client.add_cog(Moderator(client))","sub_path":"assets/mods.py","file_name":"mods.py","file_ext":"py","file_size_in_byte":3953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"649119372","text":"def gc_blocks(seq, block_size):\n \"\"\"Divides a given sequence into blocks of given size.\n For each, block, the GC content is computed and returned in a tuple.\"\"\"\n\n #make the sequence all upper case\n seq = seq.upper()\n\n #initialize count variables\n index = 0\n count = 0\n\n #initialize block and gc_list\n block = ''\n gc_list = []\n\n #run entire length of sequence\n while index < len(seq) and len(seq[index:]) >= block_size:\n\n #determine each block and gc content\n while count < block_size:\n block += seq[index]\n count += 1\n index += 1\n\n #compute gc content from each block\n gc_number = block.count('C') + block.count('G')\n gc_fraction = gc_number / block_size\n gc_list += [gc_fraction]\n\n #reset count and block\n count = 0\n block = ''\n\n #convert gc_list to a tuple\n gc_tuple = tuple(gc_list)\n\n return gc_tuple\n\ndef gc_map(seq, block_size, gc_thresh):\n \"\"\"Takes as an imput a sequence, block size, and set GC threshold.\n Returns the original sequence.\n Blocks with GC content > threshold are capitalized.\n Blocks with GC content < threshold are lowercase.\n Bases not included in blocks are truncated.\"\"\"\n\n #initialize variables\n formatted_seq = ''\n\n while len(seq) >= block_size:\n\n #cut block\n block = seq[:block_size]\n\n #format block based on gc threshold\n if gc_blocks(block, block_size)[0] < gc_thresh:\n block = block.lower()\n\n #add block to working output sequence\n formatted_seq += block\n\n #remove block from seq\n seq = seq[block_size:]\n\n return formatted_seq\n","sub_path":"pathogen_islands.py","file_name":"pathogen_islands.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"435682841","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/9/16 13:58\n# @Author : Wang fang chen\n# @Email : itjavawfc@163.com\n# @WeiXin :w335441537\n# @File : pymysql添加多条数据.py\n# @Software: PyCharm\nimport pymysql\n\nuser=input('user>>>:').strip()\npwd=input('pwd>>>:').strip()\n\nconn = pymysql.connect(host='47.244.28.93',\n user='test',\n password='test123',\n database='pythonstudy',\n charset='utf8')\ncursor=conn.cursor() #执行完返回的结果集默认以元组显示\nsql='select * from userinfo where name=%s and pwd=%s' #name、pwd等没有用引号引用起来\nprint(sql)\nres=cursor.execute(sql,[user,pwd]) #execute语法有字符串拼接的作用\nprint(res)\nsql1='insert into userinfo(name,pwd,sex,result) values(%s,%s,%s,%s)' #name、pwd等没有用引号引用起来\ndata=[\n ('wucaixia','1234',1,100),\n ('xiaowang','123',1,100),\n ('zhangsan','123',1,100),\n ('lisi','123',1,100)\n]\nprint(sql1)\nif res:\n print('登陆成功,准备执行插入操作!')\n res1 = cursor.executemany(sql1, data) #拼接并执行sql语句\n conn.commit() # 涉及写操作要注意提交\nelse:\n print('登陆失败!')\n\ncursor.close()\nconn.close()\n\n'''\n总结:2点\n1、executemany 执行操作,后面跟的拼接数据类型data,是列表\n2、cursor.executemany 执行之后,需要commit\n\n'''\n","sub_path":"com/wfc/python/day9pymysql/pymysql添加多条数据.py","file_name":"pymysql添加多条数据.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"445193993","text":"import datetime\n\n\ndef find_days(month, limit):\n\n if limit > 12:\n limit = 12\n\n days = []\n start = datetime.date(datetime.datetime.now().year, month, 1)\n\n def next_weekday(d, weekday):\n days_ahead = weekday - d.weekday()\n if days_ahead <= 0: # Target day already happened this week\n days_ahead += 7\n return d + datetime.timedelta(days_ahead)\n\n current = 0\n considered_day = 0\n cycle_day = start\n\n while current < limit:\n while current < limit and cycle_day.month == month:\n days.append(next_weekday(cycle_day, considered_day))\n cycle_day = next_weekday(days[current], 6)\n current += 1\n cycle_day = start\n considered_day += 1\n\n while len(days) != 12:\n days.append(datetime.date(1900, 1, 1))\n\n return days\n\n\ndef calc_days(money, money_per_days):\n days = 0\n while (days * money_per_days) < money:\n days += 1\n return days\n","sub_path":"src/days.py","file_name":"days.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"317853693","text":"# -*- coding: utf-8 -*-\n\nimport os\n\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.db.models.signals import post_save, post_delete\nfrom django.dispatch import receiver\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.conf import settings\n\nfrom . import models as media_models\nfrom apps.notify import models as notify_models\nfrom web.core.middleware.thread_local import get_current_profile\nfrom web.core.utils import get_html_message, get_bell_notification_status, get_email_notification_status\n\n\n@receiver([post_save, post_delete], sender=media_models.Photo)\n@receiver([post_save, post_delete], sender=media_models.Video)\ndef media_notification(sender, instance, **kwargs):\n company_staff = []\n profile = get_current_profile()\n # If there is no JWT token in the request,\n # then we don't create notifications (Useful at admin & shell for debugging)\n if not profile:\n return\n\n try:\n endpoint = os.path.join(settings.PROTOCOL+'://', settings.BASE_URL, 'dashboard')\n if instance.content_type.name == 'company':\n company_staff = instance.content_object.profiles.all()\n title = instance.content_object.name\n elif instance.content_type.name == 'project':\n # Level 1 project\n if instance.content_object.shared_project:\n level1_project = instance.content_object.shared_project.profiles.all().union(\n instance.content_object.shared_project.company.get_owners_and_delegates()\n )\n company_staff = instance.content_object.profiles.all().union(\n instance.content_object.company.get_owners_and_delegates(),\n level1_project\n )\n else:\n company_staff = instance.content_object.profiles.all().union(\n instance.content_object.company.get_owners_and_delegates()\n )\n endpoint = os.path.join(settings.PROTOCOL+'://', settings.BASE_URL, 'project/%s' % instance.object_id)\n title = instance.content_object.name\n elif instance.content_type.name == 'bom':\n company_staff = instance.content_object.owner.profiles.all()\n endpoint = os.path.join(settings.PROTOCOL+'://', settings.BASE_URL, 'preventivi')\n title = instance.content_object.title\n\n content = \"Mr %s %s have created this event. \" % (profile.first_name, profile.last_name)\n\n if 'created' in kwargs:\n if kwargs['created']:\n subject = _('New Media file (%s) created in %s (%s)' % (instance.title, instance.content_type.name, title))\n else:\n subject = _('Media file (%s) updated in %s (%s)' % (instance.title, instance.content_type.name, title))\n else:\n subject = _('Media file (%s) deleted in %s (%s)' % (instance.title, instance.content_type.name, title))\n\n final_content = \"For simplicity, the following button will redirect to the target page.\"\n body = get_html_message(content, final_content, endpoint)\n type = ContentType.objects.get(model=instance.content_type.name.lower())\n\n notify_obj = notify_models.Notify.objects.create(\n sender=profile, subject=subject, body=body,\n content_type=type, object_id=instance.object_id,\n creator=profile.user, last_modifier=profile.user\n )\n\n recipient_objs = []\n\n for staff in company_staff:\n bell_status = get_bell_notification_status(\n staff, instance.content_type.name\n )\n email_status = get_email_notification_status(\n staff, instance.content_type.name\n )\n\n if bell_status or email_status:\n recipient_objs.append(notify_models.NotificationRecipient(\n notification=notify_obj, is_email=email_status,\n is_notify=bell_status, recipient=staff,\n creator=profile.user, last_modifier=profile.user)\n )\n\n notify_models.NotificationRecipient.objects.bulk_create(\n recipient_objs,\n batch_size=100\n )\n except Exception as e:\n print(e)\n","sub_path":"apps/media/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":4271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"235813268","text":"\nfrom ..utilities.mutations import pnt_mtype\nfrom dryadic.features.mutations import MuType\n\nfrom .param_list import params, mut_lvls\nfrom ..utilities.data_dirs import vep_cache_dir, expr_sources\nfrom ...features.data.oncoKB import get_gene_list\nfrom ...features.cohorts.utils import get_cohort_data\n\nimport os\nimport argparse\nimport bz2\nimport dill as pickle\nfrom itertools import product\n\n\ndef main():\n parser = argparse.ArgumentParser(\n 'setup_tour',\n description=\"Load datasets and enumerate subgroupings to be tested.\"\n )\n\n parser.add_argument('expr_source', type=str,\n help=\"a source of expression data\")\n parser.add_argument('cohort', type=str, help=\"a tumour cohort\")\n parser.add_argument('search_params', type=str,)\n parser.add_argument('mut_lvls', type=str,)\n parser.add_argument('out_dir', type=str,)\n\n # parse command line arguments\n args = parser.parse_args()\n out_path = os.path.join(args.out_dir, 'setup')\n\n lvl_lists = [('Gene', ) + lvl_list\n for lvl_list in mut_lvls[args.mut_lvls]]\n search_dict = params[args.search_params]\n use_genes = get_gene_list(min_sources=2)\n\n cdata = get_cohort_data(args.cohort, args.expr_source, lvl_lists,\n vep_cache_dir, out_path, use_genes,\n use_copies=False)\n with bz2.BZ2File(os.path.join(out_path, \"cohort-data.p.gz\"), 'w') as f:\n pickle.dump(cdata, f, protocol=-1)\n\n total_samps = len(cdata.get_samples())\n max_samps = total_samps - search_dict['samp_cutoff']\n test_mtypes = set()\n\n for gene in set(dict(tuple(cdata.mtrees.values())[0])):\n pnt_count = {len(cdata.mtrees[lvls][gene].get_samples())\n for lvls in lvl_lists}\n\n assert len(pnt_count) == 1, (\n \"Mismatching mutation trees for gene {}!\".format(gene))\n pnt_count = tuple(pnt_count)[0]\n\n if pnt_count >= search_dict['samp_cutoff']:\n samp_dict = {None: cdata.mtrees[lvl_lists[0]][gene].get_samples()}\n gene_types = set()\n\n for lvls in lvl_lists:\n use_mtree = cdata.mtrees[lvls][gene]\n\n lvl_types = {\n mtype for mtype in use_mtree.combtypes(\n comb_sizes=tuple(\n range(1, search_dict['branch_combs'] + 1)),\n min_type_size=search_dict['samp_cutoff'],\n min_branch_size=search_dict['min_branch']\n )\n }\n\n samp_dict.update({mtype: mtype.get_samples(use_mtree)\n for mtype in lvl_types})\n\n gene_types |= {mtype for mtype in lvl_types\n if (len(samp_dict[mtype]) <= max_samps\n and len(samp_dict[mtype]) < pnt_count)}\n\n rmv_mtypes = set()\n for rmv_mtype in sorted(gene_types):\n rmv_lvls = rmv_mtype.get_levels()\n\n for cmp_mtype in sorted(gene_types\n - {rmv_mtype} - rmv_mtypes):\n cmp_lvls = cmp_mtype.get_levels()\n\n if (samp_dict[rmv_mtype] == samp_dict[cmp_mtype]\n and (rmv_mtype.is_supertype(cmp_mtype)\n or (any('domain' in lvl for lvl in rmv_lvls)\n and all('domain' not in lvl\n for lvl in cmp_lvls))\n or len(rmv_lvls) > len(cmp_lvls)\n or rmv_mtype > cmp_mtype)):\n rmv_mtypes |= {rmv_mtype}\n break\n\n test_mtypes |= {MuType({('Gene', gene): mtype})\n for mtype in gene_types - rmv_mtypes}\n test_mtypes |= {MuType({('Gene', gene): None})}\n\n with open(os.path.join(out_path, \"muts-list.p\"), 'wb') as f:\n pickle.dump(sorted(test_mtypes), f, protocol=-1)\n with open(os.path.join(out_path, \"muts-count.txt\"), 'w') as fl:\n fl.write(str(len(test_mtypes)))\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"experiments/subgrouping_tour/setup_tour.py","file_name":"setup_tour.py","file_ext":"py","file_size_in_byte":4213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"98196115","text":"# roms/nonparametric/test_base.py\n\"\"\"Tests for roms.nonparametric._base.\"\"\"\n\nimport os\nimport h5py\nimport pytest\nimport numpy as np\nfrom scipy import linalg as la\n\nimport opinf\n\nfrom .. import (MODELFORM_KEYS, MODEL_FORMS,\n _get_data, _get_operators, _trainedmodel)\n\n\nclass TestNonparametricOpInfROM:\n \"\"\"Test roms.nonparametric._base._NonparametricOpInfROM.\"\"\"\n\n class Dummy(opinf.roms.nonparametric._base._NonparametricOpInfROM):\n \"\"\"Instantiable version of _NonparametricOpInfROM.\"\"\"\n _LHS_ARGNAME = \"ddts\"\n\n def predict(*args, **kwargs):\n pass\n\n # Properties --------------------------------------------------------------\n def test_operator_matrix_(self, r=15, m=3):\n \"\"\"Test operator_matrix_.\"\"\"\n c, A, H, G, B = _get_operators(r, m, expanded=False)\n\n rom = self.Dummy(\"cA\")\n rom.r = r\n rom.c_ = c\n rom.A_ = A\n assert np.all(rom.operator_matrix_ == np.column_stack([c, A]))\n\n rom.modelform = \"HB\"\n rom.r, rom.m = r, m\n rom.H_ = H\n rom.B_ = B\n assert np.all(rom.operator_matrix_ == np.column_stack([H, B]))\n\n rom.modelform = \"G\"\n rom.r = r\n rom.G_ = G\n assert np.all(rom.operator_matrix_ == G)\n\n def test_data_matrix_(self, k=500, m=20, r=10):\n \"\"\"Test data_matrix_, i.e., spot check _assemble_data_matrix().\"\"\"\n Q, Qdot, U = _get_data(r, k, m)\n\n rom = self.Dummy(\"cAH\")\n with pytest.raises(AttributeError) as ex:\n D = rom.data_matrix_\n assert ex.value.args[0] == \"data matrix not constructed (call fit())\"\n assert rom.d is None\n\n rom.modelform = \"A\"\n rom._fit_solver(None, Q, Qdot, inputs=None)\n assert np.all(rom.data_matrix_ == Q.T)\n assert rom.d == rom.data_matrix_.shape[1]\n\n rom.modelform = \"B\"\n rom._fit_solver(None, Q, Qdot, inputs=U)\n assert np.all(rom.data_matrix_ == U.T)\n assert rom.d == rom.data_matrix_.shape[1]\n\n rom .modelform = \"HG\"\n rom._fit_solver(None, Q, Qdot, inputs=None)\n D = np.column_stack([opinf.utils.kron2c(Q).T,\n opinf.utils.kron3c(Q).T])\n assert np.allclose(rom.data_matrix_, D)\n assert rom.d == rom.data_matrix_.shape[1]\n\n rom.modelform = \"c\"\n rom._fit_solver(None, Q, Qdot, inputs=None)\n assert np.all(rom.data_matrix_ == np.ones((k, 1)))\n assert rom.d == 1\n\n # Fitting -----------------------------------------------------------------\n def test_check_training_data_shapes(self):\n \"\"\"Test _check_training_data_shapes().\"\"\"\n # Get test data.\n k, m, r = 50, 20, 10\n Q, dQ, U = _get_data(r, k, m)\n rom = self.Dummy(\"A\")\n rom.r = r\n\n def _test(args, message):\n with pytest.raises(ValueError) as ex:\n rom._check_training_data_shapes(args)\n assert ex.value.args[0] == message\n\n # Try to fit the rom with a single snapshot.\n args = [(Q[:, 0], \"states\"), (dQ, \"dQ\")]\n _test(args, \"states must be two-dimensional\")\n\n # Try to fit the rom with misaligned Q and dQ.\n args = [(Q, \"staaates\"), (dQ[:, 1:-1], \"dQs\")]\n _test(args, f\"dQs.shape[-1] = {k-2:d} != {k:d} = staaates.shape[-1]\")\n\n # Try to fit the rom with misaligned Q and U.\n rom.modelform = \"AB\"\n rom.r, rom.m = r, m\n args = [(Q, \"states\"), (dQ, \"dQ\"), (U[:, 1:-1], \"inputs\")]\n _test(args, f\"inputs.shape[-1] = {k-2:d} != {k} = states.shape[-1]\")\n\n # Try with bad number of rows in states.\n args = [(Q[:-1, :], \"states\"), (dQ, \"dQ\"), (U, \"inputs\")]\n _test(args, f\"states.shape[0] != n or r (n=None, r={r})\")\n\n # Try with one-dimensional inputs when not allowed.\n rom.m = 2\n args = [(Q, \"states\"), (dQ, \"dQ\"), (U[:, 0], \"inputs\")]\n _test(args, \"inputs must be two-dimensional (m > 1)\")\n\n # Try with bad number of rows in inputs.\n rom.m = m\n args = [(Q, \"states\"), (dQ, \"dQ\"), (U[:-1, :], \"inputs\")]\n _test(args, f\"inputs.shape[0] = {m-1} != {m} = m\")\n\n # Try with bad dimension in inputs with m = 1.\n rom.m = 1\n args = [(U.reshape(1, 1, -1), \"inputs\")]\n _test(args, \"inputs must be one- or two-dimensional (m = 1)\")\n\n # Try with bad two-dimensional inputs with m = 1.\n rom.m = 1\n args = [(U.reshape(-1, 1), \"inputs\")]\n _test(args, \"inputs.shape != (1, k) (m = 1)\")\n\n # Correct usage.\n args = [(Q, \"states\"), (dQ, \"dQ\")]\n rom._check_training_data_shapes(args)\n args.append((U, \"inputs\"))\n rom.m = m\n rom._check_training_data_shapes(args)\n\n # Special case: m = inputs.ndim = 1.\n args[-1] = (U[0, :], \"inputs\")\n rom.m = 1\n rom._check_training_data_shapes(args)\n\n def test_process_fit_arguments(self, n=60, k=500, m=20, r=10):\n \"\"\"Test _process_fit_arguments().\"\"\"\n # Get test data.\n Q, lhs, U = _get_data(n, k, m)\n U1d = U[0, :]\n Vr = la.svd(Q)[0][:, :r]\n ones = np.ones(k)\n\n # Try with bad solver option.\n rom = self.Dummy(\"AB\")\n\n with pytest.raises(TypeError) as ex:\n rom._process_fit_arguments(None, None, None, None,\n solver=opinf.lstsq.PlainSolver)\n assert ex.value.args[0] == \"solver must be an instance, not a class\"\n\n class _DummySolver:\n pass\n\n with pytest.raises(TypeError) as ex:\n rom._process_fit_arguments(None, None, None, None,\n solver=_DummySolver())\n assert ex.value.args[0] == \"solver must have a 'fit()' method\"\n\n # With basis and input.\n Q_, lhs_, solver = rom._process_fit_arguments(Vr, Q, lhs, U)\n assert rom.n == n\n assert rom.r == r\n assert isinstance(rom.basis, opinf.basis.LinearBasis)\n assert np.all(rom.basis.entries == Vr)\n assert rom.m == m\n assert np.allclose(Q_, Vr.T @ Q)\n assert np.allclose(lhs_, Vr.T @ lhs)\n assert isinstance(solver, opinf.lstsq.PlainSolver)\n\n # Without basis and with a one-dimensional input.\n rom.modelform = \"cHB\"\n Q_, lhs_, solver = rom._process_fit_arguments(None,\n Q, lhs, U1d, solver=0)\n assert rom.n is None\n assert rom.r == n\n assert rom.basis is None\n assert rom.m == 1\n assert Q_ is Q\n assert lhs_ is lhs\n assert isinstance(solver, opinf.lstsq.PlainSolver)\n\n # With basis and no input.\n rom.modelform = \"cA\"\n Q_, lhs_, solver = rom._process_fit_arguments(Vr, Q, lhs, None,\n solver=1)\n assert rom._projected_operators_ == \"\"\n assert rom.n == n\n assert rom.r == r\n assert isinstance(rom.basis, opinf.basis.LinearBasis)\n assert np.all(rom.basis.entries == Vr)\n assert rom.m == 0\n assert np.allclose(Q_, Vr.T @ Q)\n assert np.allclose(lhs_, Vr.T @ lhs)\n assert isinstance(solver, opinf.lstsq.L2Solver)\n\n # With known operators for A.\n c, A, _, _, B = _get_operators(n, m, expanded=True)\n rom.modelform = \"AHB\"\n Q_, lhs_, _ = rom._process_fit_arguments(Vr, Q, lhs, U,\n known_operators={\"A\": A})\n assert rom._projected_operators_ == \"A\"\n assert np.allclose(lhs_, Vr.T @ (lhs - (A @ Vr @ Q_)))\n\n # With known operators for c and B.\n rom.modelform = \"cAHB\"\n ops = {\"B\": B, \"c\": c}\n Q_, lhs_, _ = rom._process_fit_arguments(Vr, Q, lhs, U,\n known_operators=ops)\n assert sorted(rom._projected_operators_) == sorted(\"Bc\")\n lhstrue = Vr.T @ (lhs - B @ U - np.outer(c, ones))\n assert np.allclose(lhs_, lhstrue)\n\n # Special case: m = inputs.ndim = 1\n U1d = U[0]\n B1d = B[:, 0]\n Q_, lhs_, _ = rom._process_fit_arguments(Vr, Q, lhs, U1d,\n known_operators={\"B\": B1d})\n assert rom.m == 1\n assert rom._projected_operators_ == \"B\"\n assert np.allclose(lhs_, Vr.T @ (lhs - np.outer(B1d, ones)))\n\n # Fully intrusive.\n rom.modelform = \"cA\"\n ops = {\"c\": c, \"A\": A}\n Q_, lhs_, _ = rom._process_fit_arguments(Vr, Q, lhs, None,\n known_operators=ops)\n assert sorted(rom._projected_operators_) == sorted(\"cA\")\n assert Q_ is None\n assert lhs_ is None\n\n def test_assemble_data_matrix(self, k=500, m=20, r=10):\n \"\"\"Test _assemble_data_matrix().\"\"\"\n # Get test data.\n Q_, _, U = _get_data(r, k, m)\n\n rom = self.Dummy(\"c\")\n for form in MODEL_FORMS:\n rom.modelform = form\n rom.r = r\n if 'B' in form:\n rom.m = m\n D = rom._assemble_data_matrix(Q_, U)\n d = opinf.lstsq.lstsq_size(form, r, m if 'B' in form else 0)\n assert D.shape == (k, d)\n\n # Spot check.\n if form == \"c\":\n assert np.allclose(D, np.ones((k, 1)))\n elif form == \"H\":\n assert np.allclose(D, opinf.utils.kron2c(Q_).T)\n elif form == \"G\":\n assert np.allclose(D, opinf.utils.kron3c(Q_).T)\n elif form == \"AB\":\n assert np.allclose(D[:, :r], Q_.T)\n assert np.allclose(D[:, r:], U.T)\n\n # Try with one-dimensional inputs as a 1D array.\n rom.modelform = \"cB\"\n rom.m = 1\n D = rom._assemble_data_matrix(Q_, U[0])\n assert D.shape == (k, 2)\n assert np.allclose(D, np.column_stack((np.ones(k), U[0])))\n\n def test_extract_operators(self, m=2, r=10):\n \"\"\"Test _extract_operators().\"\"\"\n shapes = {\n \"c_\": (r,),\n \"A_\": (r, r),\n \"H_\": (r, r*(r+1)//2),\n \"G_\": (r, r*(r+1)*(r+2)//6),\n \"B_\": (r, m),\n }\n\n rom = self.Dummy(\"\")\n\n for form in MODEL_FORMS:\n rom.modelform = form\n rom.r = r\n if 'B' in form:\n rom.m = m\n d = opinf.lstsq.lstsq_size(form, r, rom.m)\n Ohat = np.random.random((r, d))\n rom._extract_operators(Ohat)\n for prefix in MODELFORM_KEYS:\n attr = prefix+'_'\n assert hasattr(rom, attr)\n value = getattr(rom, attr)\n if prefix in form:\n assert opinf.operators.is_operator(value)\n assert value.shape == shapes[attr]\n else:\n assert value is None\n\n def test_fit(self, n=60, k=500, m=20, r=10):\n \"\"\"Test fit().\"\"\"\n # Get test data.\n Q, F, U = _get_data(n, k, m)\n U1d = U[0, :]\n Vr = la.svd(Q)[0][:, :r]\n args_n = [Q, F]\n args_r = [Vr.T @ Q, Vr.T @ F]\n\n # Fit the rom with each modelform.\n rom = self.Dummy(\"c\")\n for form in MODEL_FORMS:\n rom.modelform = form\n if \"B\" in form:\n # Two-dimensional inputs.\n rom.fit(Vr, *args_n, inputs=U) # With basis.\n rom.fit(None, *args_r, inputs=U) # Without basis.\n # One-dimensional inputs.\n rom.fit(Vr, *args_n, inputs=U1d) # With basis.\n rom.fit(None, *args_r, inputs=U1d) # Without basis.\n else:\n # No inputs.\n rom.fit(Vr, *args_n, inputs=None) # With basis.\n rom.fit(None, *args_r, inputs=None) # Without basis.\n\n # Special case: fully intrusive.\n rom.modelform = \"BA\"\n _, A, _, _, B = _get_operators(n, m)\n rom.fit(Vr, None, None, known_operators={\"A\": A, \"B\": B})\n assert rom.solver_ is None\n assert opinf.operators.is_operator(rom.A_)\n assert opinf.operators.is_operator(rom.B_)\n assert np.allclose(rom.A_.entries, Vr.T @ A @ Vr)\n assert np.allclose(rom.B_.entries, Vr.T @ B)\n\n # Model persistence -------------------------------------------------------\n def test_save(self, n=15, m=2, r=3, target=\"_savemodeltest.h5\"):\n \"\"\"Test save().\"\"\"\n # Clean up after old tests.\n if os.path.isfile(target): # pragma: no cover\n os.remove(target)\n\n # Get a test model.\n Vr = np.random.random((n, r))\n rom = _trainedmodel(self.Dummy, \"cAHGB\", Vr, m)\n\n def _checkfile(filename, rom, hasbasis):\n assert os.path.isfile(filename)\n with h5py.File(filename, 'r') as data:\n # Check metadata.\n assert \"meta\" in data\n assert len(data[\"meta\"]) == 0\n assert data[\"meta\"].attrs[\"modelform\"] == rom.modelform\n\n # Check basis\n if hasbasis:\n assert \"basis\" in data\n assert np.all(data[\"basis/entries\"][:] == Vr)\n else:\n assert \"basis\" not in data\n\n # Check operators\n assert \"operators\" in data\n if \"c\" in rom.modelform:\n assert np.all(data[\"operators/c_\"][:] == rom.c_.entries)\n else:\n assert \"c_\" not in data[\"operators\"]\n if \"A\" in rom.modelform:\n assert np.all(data[\"operators/A_\"][:] == rom.A_.entries)\n else:\n assert \"A_\" not in data[\"operators\"]\n if \"H\" in rom.modelform:\n assert np.all(data[\"operators/H_\"][:] == rom.H_.entries)\n else:\n assert \"H_\" not in data[\"operators\"]\n if \"G\" in rom.modelform:\n assert np.all(data[\"operators/G_\"][:] == rom.G_.entries)\n else:\n assert \"G_\" not in data[\"operators\"]\n if \"B\" in rom.modelform:\n assert np.all(data[\"operators/B_\"][:] == rom.B_.entries)\n else:\n assert \"B_\" not in data[\"operators\"]\n\n rom.save(target, save_basis=False)\n _checkfile(target, rom, False)\n\n with pytest.raises(FileExistsError) as ex:\n rom.save(target, overwrite=False)\n assert ex.value.args[0] == f\"{target} (overwrite=True to ignore)\"\n\n rom.save(target, save_basis=True, overwrite=True)\n _checkfile(target, rom, True)\n\n rom = _trainedmodel(self.Dummy, \"c\", Vr, 0)\n rom.save(target, overwrite=True)\n _checkfile(target, rom, True)\n\n rom = _trainedmodel(self.Dummy, \"AB\", Vr, m)\n rom.basis = None\n rom.save(target, save_basis=True, overwrite=True)\n _checkfile(target, rom, False)\n\n # Check that save() and load() are inverses.\n rom.basis = Vr\n rom.save(target, save_basis=True, overwrite=True)\n rom2 = rom.load(target)\n assert rom2 is not rom\n assert rom2.basis == rom.basis\n assert rom2 == rom\n for attr in [\"n\", \"m\", \"r\", \"modelform\", \"__class__\"]:\n assert getattr(rom, attr) == getattr(rom2, attr)\n for attr in [\"A_\", \"B_\"]:\n got = getattr(rom2, attr)\n assert opinf.operators.is_operator(got)\n assert np.all(getattr(rom, attr).entries == got.entries)\n for attr in [\"c_\", \"H_\", \"G_\"]:\n assert getattr(rom, attr) is getattr(rom2, attr) is None\n\n # Check basis = None functionality.\n rom.basis = None\n rom.save(target, overwrite=True)\n rom2 = rom.load(target)\n assert rom2 is not rom\n assert rom2 == rom\n for attr in [\"m\", \"r\", \"modelform\", \"__class__\"]:\n assert getattr(rom, attr) == getattr(rom2, attr)\n for attr in [\"A_\", \"B_\"]:\n got = getattr(rom2, attr)\n assert opinf.operators.is_operator(got)\n assert np.all(getattr(rom, attr).entries == got.entries)\n for attr in [\"n\", \"c_\", \"H_\", \"G_\", \"basis\"]:\n assert getattr(rom, attr) is getattr(rom2, attr) is None\n\n os.remove(target)\n\n def test_load(self, n=20, m=2, r=5, target=\"_loadmodeltest.h5\"):\n \"\"\"Test load().\"\"\"\n # Get test operators.\n Vr = np.random.random((n, r))\n c_, A_, H_, G_, B_ = _get_operators(n=r, m=m)\n\n # Clean up after old tests if needed.\n if os.path.isfile(target): # pragma: no cover\n os.remove(target)\n\n # Make an empty HDF5 file to start with.\n with h5py.File(target, 'w'):\n pass\n\n with pytest.raises(ValueError) as ex:\n rom = self.Dummy.load(target)\n assert ex.value.args[0] == \"invalid save format (meta/ not found)\"\n\n # Make a partially compatible HDF5 file to start with.\n with h5py.File(target, 'a') as hf:\n # Store metadata.\n meta = hf.create_dataset(\"meta\", shape=(0,))\n meta.attrs[\"modelform\"] = \"cAB\"\n\n with pytest.raises(ValueError) as ex:\n rom = self.Dummy.load(target)\n assert ex.value.args[0] == \"invalid save format (operators/ not found)\"\n\n # Store the arrays.\n with h5py.File(target, 'a') as hf:\n hf.create_dataset(\"operators/c_\", data=c_)\n hf.create_dataset(\"operators/A_\", data=A_)\n hf.create_dataset(\"operators/B_\", data=B_)\n\n def _check_model(rom):\n assert isinstance(rom, self.Dummy)\n for attr in [\"modelform\",\n \"n\", \"r\", \"m\",\n \"c_\", \"A_\", \"H_\", \"G_\", \"B_\", \"basis\"]:\n assert hasattr(rom, attr)\n assert rom.modelform == \"cAB\"\n assert rom.r == r\n assert rom.m == m\n for attr in [\"c_\", \"A_\", \"B_\"]:\n assert opinf.operators.is_operator(getattr(rom, attr))\n assert np.all(rom.c_.entries == c_)\n assert np.all(rom.A_.entries == A_)\n assert rom.H_ is None\n assert rom.G_ is None\n assert np.all(rom.B_.entries == B_)\n\n # Load the file correctly.\n rom = self.Dummy.load(target)\n _check_model(rom)\n assert rom.basis is None\n assert rom.n is None\n\n # Add the basis and then load the file correctly.\n basis = opinf.basis.LinearBasis().fit(Vr)\n with h5py.File(target, 'a') as hf:\n hf[\"meta\"].attrs[\"BasisClass\"] = \"LinearBasis\"\n basis.save(hf.create_group(\"basis\"))\n rom = self.Dummy.load(target)\n _check_model(rom)\n assert isinstance(rom.basis, type(basis))\n assert np.all(rom.basis.entries == Vr)\n assert rom.n == n\n\n # One additional test to cover other cases.\n with h5py.File(target, 'a') as f:\n f[\"meta\"].attrs[\"modelform\"] = \"HG\"\n f.create_dataset(\"operators/H_\", data=H_)\n f.create_dataset(\"operators/G_\", data=G_)\n\n rom = self.Dummy.load(target)\n assert isinstance(rom, self.Dummy)\n for attr in [\"modelform\",\n \"n\", \"r\", \"m\",\n \"c_\", \"A_\", \"H_\", \"G_\", \"B_\", \"basis\"]:\n assert hasattr(rom, attr)\n assert rom.modelform == \"HG\"\n assert rom.r == r\n assert rom.m == 0\n for attr in [\"H_\", \"G_\"]:\n assert opinf.operators.is_operator(getattr(rom, attr))\n assert rom.c_ is None\n assert rom.A_ is None\n assert np.all(rom.H_.entries == H_)\n assert np.all(rom.G_.entries == G_)\n assert rom.B_ is None\n assert isinstance(rom.basis, type(basis))\n assert np.all(rom.basis.entries == Vr)\n assert rom.n == n\n\n # Clean up.\n os.remove(target)\n","sub_path":"tests/roms/nonparametric/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":20032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"12354287","text":"# -*- coding: utf-8 -*-\nfrom gurobipy import *\nfrom flask import Flask\nfrom flask import render_template\n\napp = Flask(__name__)\n\nport = int(os.getenv(\"PORT\", 9099))\ncomputeServer = str(os.getenv(\"GUROBISERVER\", \"gurobi-preprod.phx.gapinc.dev\"))\n\ndef optimize():\n try:\n message = []\n\n # Env\n e = Env.ClientEnv(logfilename=\"\", computeServers=computeServer)\n\n # Create a new model\n m = Model(\"mip1\", e)\n\n # Create variables\n x = m.addVar(vtype=GRB.BINARY, name=\"x\")\n y = m.addVar(vtype=GRB.BINARY, name=\"y\")\n z = m.addVar(vtype=GRB.BINARY, name=\"z\")\n\n # Integrate new variables\n m.update()\n\n # Set objective\n message.append(\"Maximize x + y + 2z \\n\")\n m.setObjective(x + y + 2 * z, GRB.MAXIMIZE)\n\n # Add constraint: x + 2 y + 3 z <= 4\n message.append(\"Subject to:\\t x + 2y + 3z <= 4\\n\")\n m.addConstr(x + 2 * y + 3 * z <= 4, \"c0\")\n\n # Add constraint: x + y >= 1\n m.addConstr(x + y >= 1, \"c1\")\n message.append(\"Subject to:\\t x + y >= 1\\n\")\n\n m.optimize()\n\n for v in m.getVars():\n print('%s %g' % (v.varName, v.x))\n message.append(\"%s = %g \\n\" % (v.varName, v.x))\n\n\n print('Obj: %g' % m.objVal)\n message.append(\"Max value of x + y + 2z subject to constraints is %g \\n\" % m.objVal)\n return render_template('optimize.html', obj=m.objVal, x=x.X, y=y.X, z=z.X)\n\n except GurobiError:\n print('Error reported')\n\n\n@app.route(\"/\")\ndef get_data():\n return \"Hello: instance \" + str(os.getenv(\"CF_INSTANCE_INDEX\", 0))\n\n\n@app.route(\"/optimize\")\ndef do_optimize():\n return optimize()\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=port)\n","sub_path":"quick/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"450438921","text":"'''\nInput: a List of integers as well as an integer `k` representing the size of the sliding window\nReturns: a List of integers\n'''\ndef sliding_window_max(nums, k):\n # First Pass:\n # Assuming K is never bigger than the array\n # start with index, then look i + 1, i + 2... i + k\n # store the greatest in the output array\n # check that i + k is never greater than the last index\n # return the output array\n # this is O(kn) time. Since k isn't a constant, this can get ugly\n # but this is just a first pass\n output = []\n\n for i in range(len(nums)):\n # I did this with explicit minuses so that I didn't lose track\n # of the zero indexing. If we think of a window as primarly being\n # indices, then we have to offset k by one for zero indexing.\n if i + (k - 1) > (len(nums) - 1):\n return output\n\n greatest = nums[i]\n\n for j in range(k):\n if greatest < nums[i+j]:\n greatest = nums[i+j]\n \n output.append(greatest)\n\n\nif __name__ == '__main__':\n # Use the main function here to test out your implementation \n arr = [1, 3, -1, -3, 5, 3, 6, 7]\n k = 3\n\n print(f\"Output of sliding_window_max function is: {sliding_window_max(arr, k)}\")\n","sub_path":"sliding_window_max/sliding_window_max.py","file_name":"sliding_window_max.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"294585977","text":"import numpy as np\nimport cv2, sys, time\nimport pyqtgraph as pg\n\nimport numpy as np\n\n\ndef normality(means,vars,pr_cluster_data,data):\n #means[CLUSTERS]\n #vars[CLUSTERS]\n #pr_cluster_data[CLUSTERS][256]\n #data[256]\n\n CLUSTERS=len(means)\n residue=np.zeros((CLUSTERS),dtype=float)\n pr_fg_cluster=np.zeros((CLUSTERS),dtype=float)\n\n\n for cl in range(CLUSTERS):\n\n normalFit=dnpmf(means[cl],vars[cl])\n\n thisCluster=np.multiply(data,pr_cluster_data[cl][:]) #element by element multiplication\n np.divide(thisCluster,np.sum(thisCluster)) #normalize\n\n residue[cl]=squareDifference(normalFit,thisCluster)\n\n\n for cl in range(CLUSTERS):\n pr_fg_cluster=np.divide(residue[cl],np.sum(residue))\n\n #this is the probability of a particular cluster belonging to the foreground.\n #export the video and see with\n #a. Digital for a probability thresold\n #b. Greyscale\n\n\n\n\n\n# print(means,vars,residue,np.multiply(residue,vars)) #print all these 3 for every cluster\n\n\n\n\n\n\n\n\ndef squareDifference(ar1,ar2):\n return np.sum(np.power(np.subtract(ar1,ar2),2)) #individual element to power 2 and then sum,\n\n\ndef gaussian(x, mean, var):\n return np.exp(-np.power(x - mean, 2.) / (2 * var))\n\ndef dnpmf(mean,var): #discrete normal probability mass function\n pmf=np.zeros((256))\n for x in range(256):\n pmf[x]=gaussian(x,mean,var)\n return pmf/np.sum(pmf)\n\n\n\n\n\n\ndef gmm(data, clusters, ITERATIONS):\n # init\n data.astype('float')\n data += 1\n\n length = data.size\n condProb_cluster_pixel = np.zeros(shape=(clusters, length), dtype=np.float32)\n condProb_pixel_cluster = np.ndarray(shape=(length, clusters), dtype=np.float32)\n\n mean_cluster = np.ndarray(shape=(clusters,), dtype=np.float32)\n var_cluster = np.ones(shape=(clusters,), dtype=np.float32)\n prob_cluster = np.ndarray(shape=(clusters,), dtype=np.float32)\n\n for x in range(length):\n for c in range(clusters):\n condProb_cluster_pixel[np.random.randint(0, clusters)][x] = 1\n\n for i in range(ITERATIONS): # ITERATIONS\n\n # cluster means\n for c in range(clusters):\n if var_cluster[c] < 1e-4:\n continue\n D = np.sum(condProb_cluster_pixel[c] * data)\n N = np.sum(condProb_cluster_pixel[c] * data * np.arange(length))\n mean_cluster[c] = N / D\n\n # cluster variances\n for c in range(clusters):\n if var_cluster[c] < 1e-4:\n continue\n D = np.sum(condProb_cluster_pixel[c] * data)\n N = np.sum(condProb_cluster_pixel[c] * data * np.power(np.arange(length) - mean_cluster[c], 2))\n var_cluster[c] = N / D\n if var_cluster[c] < 1e-4:\n var_cluster[c] = 1e-4\n\n # cluster probabilitis\n for c in range(clusters):\n if var_cluster[c] < 1e-4:\n continue\n D = len(condProb_cluster_pixel[c])\n N = np.sum(condProb_cluster_pixel[c])\n prob_cluster[c] = N / D\n\n # prob(pixel | cluster)\n for c in range(clusters):\n if var_cluster[c] < 1e-4:\n continue\n condProb_pixel_cluster[:, c] = (1 / np.sqrt(2 * np.pi * var_cluster[c])) * np.exp(\n -1 * np.power(np.arange(length) - mean_cluster[c], 2) / (2 * var_cluster[c]))\n\n # prob(cluster | pixel)\n for c in range(clusters):\n if var_cluster[c] < 1e-4:\n continue\n _N = condProb_pixel_cluster[:, c] * data\n _D = np.sum(condProb_pixel_cluster * np.repeat(data, clusters).reshape((-1, clusters)), 1)\n condProb_cluster_pixel[c] = (_N / _D)\n\n return mean_cluster, var_cluster, prob_cluster\n\n\ndef find_fg(mean, var, H, var_thresh, prob_thresh):\n m = H.max()\n\n cc = []\n for i in range(len(H)):\n for j in range(len(H[0])):\n if H[i, j] == m:\n cc = [i, j]\n\n mask = np.ones(256)\n\n tot = (1 / np.sqrt(2 * np.pi * var[cc[0]])) * np.exp(\n -1 * np.power(np.arange(256) - mean[cc[0]], 2) / (2 * var[cc[0]])) + \\\n (1 / np.sqrt(2 * np.pi * var[cc[1]])) * np.exp(\n -1 * np.power(np.arange(256) - mean[cc[1]], 2) / (2 * var[cc[1]]))\n tot = tot / np.sum(tot)\n for i in range(256):\n t1 = var[cc[0]]\n t2 = var[cc[1]]\n\n if i < (mean[cc[0]] + t1 and i > mean[cc[0]] - t1) or (mean[cc[1]] + t2 and i > mean[cc[1]] - t2):\n mask[i] = 0\n\n return mask, tot\n\n\ndef build_fg_mask(history, fg_color_mask):\n height, width = len(history[0]), len(history[0][0])\n fg_mask = np.zeros((len(history), height, width, 1))\n\n N = len(history)\n print(\"Finding fg matrix\")\n tot_iter = N * width * height\n counter = 0\n comp = 0\n start_t = time.time()\n for i in range(N):\n for x in range(width):\n for y in range(height):\n counter += 1\n if (counter % int(tot_iter / 100) == 0):\n sys.stdout.write('\\r')\n comp += 1\n t_per_iter = (time.time() - start_t) / int(tot_iter / 100)\n sys.stdout.write(\n \"[%-20s] %d%% ETA=%.2f\" % ('=' * int(comp / 5), comp, (tot_iter - counter) * t_per_iter))\n sys.stdout.flush()\n start_t = time.time()\n\n fg_mask[i, y, x, 0] = fg_color_mask[y, x, history[i, y, x, 1]]\n print(\"\")\n return fg_mask\n\n\ndef plot_helper(curves, data):\n for i in range(len(curves)):\n curves[i].setData(data[i])\n pg.QtGui.QApplication.processEvents()\n\n\ndef load_log(clusters, iter, start, end, file, height, width):\n log = open(\"log_agmm.txt\", 'r')\n\n load = False\n mean, var, prob = {}, {}, {}\n for x in range(width):\n for y in range(height):\n mean[\"{},{}\".format(x, y)] = None\n var[\"{},{}\".format(x, y)] = None\n prob[\"{},{}\".format(x, y)] = None\n\n for line in log:\n v = line.strip().split()\n if v[0] == \"NE\":\n if v[1:] == list(map(str, [clusters, iter, start, end, file])):\n load = True\n else:\n load = False\n\n if load:\n if v[0] == \"MEAN\":\n mean[v[1]] = list(map(float, v[2].split(',')))\n elif v[0] == \"VAR\":\n var[v[1]] = list(map(float, v[2].split(',')))\n elif v[0] == \"PROB\":\n prob[v[1]] = list(map(float, v[2].split(',')))\n return mean, var, prob\n\n\ndef display_results(start, end, _input, _output):\n cap = cv2.VideoCapture(_output)\n for i in range(start, end):\n i_frame = cv2.imread(_input.format(i))\n _, o_frame = cap.read()\n cv2.imshow(\"input\", i_frame)\n cv2.imshow(\"output\", o_frame)\n if cv2.waitKey(20) & 0xFF == 27:\n break\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n\n cap = cv2.VideoCapture(\"output_video.avi\")\n frame_num = 1\n\n file_name = './dataset/dynamicBackground/boats/input/in{0:0>6}.jpg'\n\n frame = cv2.imread(file_name.format(frame_num))\n # _,frame = cap.read()\n clusters = 5\n iterations = 10\n\n _height, _width = frame.shape[0], frame.shape[1]\n image_name = '{} {}X{}'.format(file_name, _height, _width)\n print(_height, _width)\n\n N = 700\n history = np.zeros((N, _height, _width, 3), dtype=int)\n history[0, :, :, :] = frame\n\n pw = pg.GraphicsWindow()\n pl = pw.addPlot()\n pl.setYRange(0, 1, padding=0)\n\n color_mask, hist, min_var_tot = pl.plot(fillLevel=0, brush=(255, 255, 255, 40)), pl.plot(pen=(1, 2)), pl.plot(\n brush=(255, 0, 255, 100))\n\n g_curves = []\n for i in range(clusters):\n g_curves.append(pl.plot(fillLevel=i, pen=(i, clusters)))\n pg.QtGui.QApplication.processEvents()\n\n start = 0\n end = _height\n\n while (1):\n sys.stdout.write('\\r')\n sys.stdout.write(\"%d\" % frame_num)\n\n frame = cv2.imread(file_name.format(frame_num))\n # _,frame = cap.read()\n try:\n history[frame_num % N, :, :, :] = frame\n except:\n break\n\n if frame_num % N == 0:\n\n mean_dict, var_dict, prob_dict = load_log(clusters, iterations, frame_num - N, frame_num, file_name,\n _height, _width)\n H = np.zeros((_height, _width, clusters, clusters))\n\n log = open('log_agmm.txt', 'a')\n log.write(\"NE \" + str(clusters) + \" \" + str(iterations) + \" \" + str(frame_num - N) + \" \" + str(\n frame_num) + \" \" + file_name + '\\n')\n\n # estimating gaussians\n print(\"\\nestimating gaussians\")\n tot_iter = (end - start) * _width\n counter, start_t = 0, time.time()\n for y in range(start, end):\n for x in range(_width):\n counter += 1\n if (counter % int(tot_iter / 100) == 0):\n sys.stdout.write('\\r')\n comp = (y * _width + x + 1) / tot_iter\n t_per_iter = (time.time() - start_t) / int(tot_iter / 100)\n sys.stdout.write(\"[%-20s] %d%% ETA=%.2f\" % (\n '=' * int(comp * 20), 100 * comp, (tot_iter - counter) * t_per_iter))\n sys.stdout.flush()\n start_t = time.time()\n\n index = \"{},{}\".format(x, y)\n if (mean_dict[index] != None and var_dict[index] != None):\n continue\n\n data = np.zeros(256)\n for i in range(N):\n data[history[i, y, x, 1]] += 1\n\n mean_cluster, var_cluster, prob_cluster = gmm(data, clusters, iterations)\n\n mean_dict[index] = mean_cluster\n var_dict[index] = var_cluster\n prob_dict[index] = prob_cluster\n\n log.write(\"MEAN \" + index + \" \" + \",\".join(map(str, mean_cluster)) + '\\n')\n log.write(\"VAR \" + index + \" \" + \",\".join(map(str, var_cluster)) + '\\n')\n log.write(\"PROB \" + index + \" \" + \",\".join(map(str, prob_cluster)) + '\\n')\n print('\\n')\n\n # finding fg color masks\n print(\"finding color masks\")\n counter, start_t = 0, time.time()\n fg_color_mask = np.zeros((_height, _width, 256))\n for y in range(start, end):\n for x in range(_width):\n counter += 1\n if (counter % int(tot_iter / 100) == 0):\n sys.stdout.write('\\r')\n comp = (y * _width + x + 1) / tot_iter\n t_per_iter = (time.time() - start_t) / int(tot_iter / 100)\n sys.stdout.write(\"[%-20s] %d%% ETA=%.2f\" % (\n '=' * int(comp * 20), 100 * comp, (tot_iter - counter) * t_per_iter))\n sys.stdout.flush()\n start_t = time.time()\n\n index = \"{},{}\".format(x, y)\n if mean_dict[index] is None:\n continue\n\n###############################################\n normality()\n #pass the paramenters\n # normality(means,vars,pr_cluster_data,data)\n###############################################\n fg_color_mask[y, x, :], tot = find_fg(mean_dict[index], var_dict[index], H[y, x, :, :], 2000, 0.00)\n\n if index == \"240,200\":\n data = np.zeros(256)\n for i in range(N):\n data[history[i, y, x, 1]] += 1\n plot_helper([hist, min_var_tot, color_mask], [data / np.max(data), tot, fg_color_mask[y, x, :]])\n print('\\n')\n\n # finding fg\n fg = build_fg_mask(history, fg_color_mask)\n\n writer = cv2.VideoWriter('fg{}.avi'.format(frame_num - N), -1, N, (_width, _height), False)\n for i in range(N):\n writer.write((255 * fg[i]).astype('uint8'))\n writer.release()\n\n # display results\n display_results(frame_num - N, frame_num, file_name, 'fg{}.avi'.format(frame_num - N))\n\n frame_num += 1\n if cv2.waitKey(20) & 0xFF == 27:\n break\n\n _ = input()\n cv2.destroyAllWindows()\n","sub_path":"GMM/gmm_normality.py","file_name":"gmm_normality.py","file_ext":"py","file_size_in_byte":12459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"400568295","text":"from component_sintax import ComponentSintax\nimport codecs\nimport json\nimport pdb\n\nclass ComponentsSintax:\n\n def __init__(self, desc_file_name):\n with codecs.open(desc_file_name, 'rw+', encoding='UTF-8') as data_file:\n data = json.load(data_file)\n self.get_components(data)\n\n def get_components(self, data):\n self.components = {}\n for c_name, value in data.iteritems():\n self.components[c_name] = ComponentSintax(value)\n\n def get_value(self, c_name, text, structure=True):\n component = self.components[c_name]\n return component.get_value(text, structure)\n","sub_path":"bibliographic_search_tool/searchtool/bibliographic_lib/bibliographic_parser/article_components/components_sintax.py","file_name":"components_sintax.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"300907174","text":"\"\"\"High-level API.\n\n\"\"\"\nfrom volumina.pixelpipeline.datasources import *\nfrom volumina.pixelpipeline.datasourcefactories import *\nfrom volumina.layer import *\nfrom volumina.layerstack import LayerStackModel\nfrom volumina.volumeEditor import VolumeEditor\nfrom volumina.navigationControler import NavigationInterpreter\nfrom volumina import colortables\n\nfrom PyQt4.QtCore import QTimer\nfrom PyQt4.QtGui import QMainWindow, QApplication, QIcon, QAction, qApp\nfrom PyQt4.uic import loadUi\n\nimport os\nimport random\n\n_has_lazyflow = True\ntry:\n from lazyflow.operators.adaptors import Op5ifyer\nexcept ImportError as e:\n exceptStr = str(e)\n _has_lazyflow = False\nfrom volumina.adaptors import Array5d\n\n\n#******************************************************************************\n# C l i c k a b l e S e g m e n t a t i o n L a y e r * \n#******************************************************************************\n\nclass ClickableSegmentationLayer(QObject):\n\n #whether label (int) is shown (true) or hidden (false)\n clickedValue = pyqtSignal(int, bool, QColor)\n\n def __init__(self, seg, viewer, name=None, direct=None, parent=None, colortable=None, reuseColors=True):\n \"\"\" seg: segmentation image/volume (5D) \n reuseColors: if True, colors are assigned based on the number of currently visible objects,\n if False, a segment with 'label' is assigned colortable[label] as color\n \"\"\"\n super(ClickableSegmentationLayer, self).__init__(parent)\n\n assert seg.ndim == 5\n\n #public attributes \n self.layer = None #volumina layer object\n self.relabelingSource = None #RelabelingArraySource\n self._reuseColors = reuseColors\n\n self._M = seg.max()\n self._clickedObjects = dict() #maps from object to the label that is used for it\n self._usedLabels = set()\n self._seg = seg\n\n relabeling = numpy.zeros(self._M+1, dtype=self._seg.dtype)\n\n #add layer\n if colortable is None:\n colortable = volumina.layer.generateRandomColors(1000, \"hsv\", {\"v\": 1.0}, zeroIsTransparent=True)\n colortable[1:17] = colortables.default16\n \n layer, source = viewer.addRelabelingColorTableLayer(seg, clickFunctor=self.onClick, name=name,\n relabeling=relabeling, colortable=colortable, direct=direct)\n layer.zeroIsTransparent = True\n layer.colortableIsRandom = True\n self.layer = layer\n self.relabelingSource = source\n\n def setMaxLabel(self, l):\n self._M = l\n self.relabelingSource.setRelabeling(numpy.zeros(self._M+1, dtype=self._seg.dtype))\n\n def deselectAll(self):\n self._clickedObjects = dict()\n self._usedLabels = set()\n self.relabelingSource.clearRelabeling() \n \n def labelColor(self, label):\n \"\"\" return the current color for object 'label' \"\"\"\n color = self.layer.colorTable[label]\n color = QColor.fromRgba(color)\n return color\n \n def labelShown(self, label):\n return label in self._clickedObjects\n\n def toggleLabel(self, label):\n color = QColor()\n shown = True\n if label in self._clickedObjects:\n self.layer._datasources[0].setRelabelingEntry(label, 0)\n self._usedLabels.remove( self._clickedObjects[label] )\n del self._clickedObjects[label]\n shown = False\n else:\n self._labels = sorted(list(self._usedLabels))\n \n if self._reuseColors:\n #find first free entry\n if self._labels:\n for l in range(1, self._labels[-1]+2):\n if l not in self._labels:\n break\n assert l not in self._usedLabels\n else:\n l = 1\n else:\n l = label\n \n color = self.labelColor(l)\n\n self._usedLabels.add(l) \n self._clickedObjects[label] = l\n self.layer._datasources[0].setRelabelingEntry(label, l)\n self.clickedValue.emit(label, shown, color)\n\n def onClick(self, layer, pos5D, pos):\n obj = layer.data.originalData[pos5D]\n self.toggleLabel(obj)\n\n#******************************************************************************\n# V i e w e r *\n#******************************************************************************\n\nclass Viewer(QMainWindow):\n \"\"\"High-level API to view multi-dimensional arrays.\n\n Properties:\n title -- window title\n\n \"\"\"\n def __init__(self, parent=None):\n QMainWindow.__init__(self, parent)\n uiDirectory = os.path.split(volumina.__file__)[0]\n if uiDirectory == '':\n uiDirectory = '.'\n loadUi(uiDirectory + '/viewer.ui', self)\n\n self._dataShape = None\n self._viewerInitialized = False\n self.editor = None\n self.viewingWidget = None\n self.actionQuit.triggered.connect(qApp.quit)\n \n #when connecting in renderScreenshot to a partial(...) function,\n #we need to remember the created function to be able to disconnect\n #to it later\n self._renderScreenshotDisconnect = None\n\n self.initLayerstackModel()\n\n self.actionCurrentView = QAction(QIcon(), \"Only for selected view\", self.menuView)\n f = self.actionCurrentView.font()\n f.setBold(True)\n self.actionCurrentView.setFont(f)\n\n self.editor = VolumeEditor(self.layerstack)\n\n #make sure the layer stack widget, which is the right widget\n #managed by the splitter self.splitter shows up correctly\n #TODO: find a proper way of doing this within the designer\n def adjustSplitter():\n s = self.splitter.sizes()\n s = [int(0.66*s[0]), s[0]-int(0.66*s[0])]\n self.splitter.setSizes(s)\n QTimer.singleShot(0, adjustSplitter)\n \n def initLayerstackModel(self):\n self.layerstack = LayerStackModel()\n self.layerWidget.init(self.layerstack)\n model = self.layerstack\n self.UpButton.clicked.connect(model.moveSelectedUp)\n model.canMoveSelectedUp.connect(self.UpButton.setEnabled)\n self.DownButton.clicked.connect(model.moveSelectedDown)\n model.canMoveSelectedDown.connect(self.DownButton.setEnabled)\n self.DeleteButton.clicked.connect(model.deleteSelected)\n model.canDeleteSelected.connect(self.DeleteButton.setEnabled)\n \n @property\n def dataShape(self):\n return self._dataShape\n @dataShape.setter\n def dataShape(self, s):\n if s is None:\n return\n assert len(s) == 5\n \n self._dataShape = s\n self.editor.dataShape = s\n if not self._viewerInitialized:\n self._viewerInitialized = True\n self.viewer.init(self.editor)\n #make sure the data shape is correctly set\n #(some signal/slot connections may be set up in the above init)\n self.editor.dataShape = s\n\n #FIXME: this code is broken\n #if its 2D, maximize the corresponding window\n #if len([i for i in list(self.dataShape)[1:4] if i == 1]) == 1:\n # viewAxis = [i for i in range(1,4) if self.dataShape[i] == 1][0] - 1\n # self.viewer.quadview.switchMinMax(viewAxis) \n \n def addGrayscaleLayer(self, a, name=None, direct=False):\n source,self.dataShape = createDataSource(a,True)\n layer = GrayscaleLayer(source, direct=direct)\n if name:\n layer.name = name\n self.layerstack.append(layer)\n return layer\n \n def addAlphaModulatedLayer(self, a, name=None):\n source,self.dataShape = createDataSource(a,True)\n layer = AlphaModulatedLayer(source)\n if name:\n layer.name = name\n self.layerstack.append(layer)\n return layer\n \n def addRGBALayer(self, a, name=None):\n assert a.shape[2] >= 3\n sources = [None, None, None,None]\n for i in range(3):\n sources[i], self.dataShape = createDataSource(a[...,i], True)\n if(a.shape[3] >= 4): \n sources[3], self.dataShape = createDataSource(a[...,3], True) \n layer = RGBALayer(sources[0],sources[1],sources[2], sources[3])\n if name:\n layer.name = name\n self.layerstack.append(layer)\n return layer\n\n def addRandomColorsLayer(self, a, name=None, direct=False):\n layer = self.addColorTableLayer(a, name, colortable=None, direct=direct)\n layer.colortableIsRandom = True\n layer.zeroIsTransparent = True\n return layer\n \n def addColorTableLayer(self, a, name=None, colortable=None, direct=False, clickFunctor=None):\n if colortable is None:\n colortable = self._randomColors()\n source,self.dataShape = createDataSource(a,True)\n if clickFunctor is None:\n layer = ColortableLayer(source, colortable, direct=direct)\n else:\n layer = ClickableColortableLayer(self.editor, clickFunctor, source, colortable, direct=direct)\n if name:\n layer.name = name\n self.layerstack.append(layer)\n return layer\n \n def addRelabelingColorTableLayer(self, a, name=None, relabeling=None, colortable=None, direct=False, clickFunctor=None, right=True):\n if colortable is None:\n colortable = self._randomColors()\n source = RelabelingArraySource(a)\n if relabeling is None:\n source.setRelabeling(numpy.zeros(numpy.max(a)+1, dtype=a.dtype))\n else:\n source.setRelabeling(relabeling)\n if colortable is None:\n colortable = [QColor(0,0,0,0).rgba(), QColor(255,0,0).rgba()]\n if clickFunctor is None:\n layer = ColortableLayer(source, colortable, direct=direct)\n else:\n layer = ClickableColortableLayer(self.editor, clickFunctor, source, colortable, direct=direct, right=right)\n if name:\n layer.name = name \n self.layerstack.append(layer)\n return (layer, source)\n \n def addClickableSegmentationLayer(self, a, name=None, direct=False, colortable=None, reuseColors=True):\n return ClickableSegmentationLayer(a, self, name=name, direct=direct, colortable=colortable, reuseColors=reuseColors) \n \n def _randomColors(self, M=256):\n \"\"\"Generates a pleasing color table with M entries.\"\"\"\n\n colors = []\n for i in range(M):\n if i == 0:\n colors.append(QColor(0, 0, 0, 0).rgba())\n else:\n h, s, v = random.random(), random.random(), 1.0\n color = numpy.asarray(colorsys.hsv_to_rgb(h, s, v)) * 255\n qColor = QColor(*color)\n colors.append(qColor.rgba())\n #for the first 16 objects, use some colors that are easily distinguishable\n colors[1:17] = colortables.default16 \n return colors\n \nif __name__ == \"__main__\":\n \n import sys\n from lazyflow.operators import OpImageReader\n from lazyflow.graph import Operator, OutputSlot, InputSlot\n from lazyflow.graph import Graph\n from vigra import VigraArray\n\n \n app = QApplication(sys.argv)\n viewer = Viewer()\n viewer.show()\n source1 = (numpy.random.random((100,100,1))) * 255\n viewer.addGrayscaleLayer(source1)\n \n class MyInterpreter(NavigationInterpreter):\n \n def __init__(self, navigationcontroler):\n NavigationInterpreter.__init__(self,navigationcontroler)\n \n def onMouseMove_default( self, imageview, event ):\n if imageview._ticker.isActive():\n #the view is still scrolling\n #do nothing until it comes to a complete stop\n return\n \n imageview.mousePos = mousePos = imageview.mapScene2Data(imageview.mapToScene(event.pos()))\n imageview.oldX, imageview.oldY = imageview.x, imageview.y\n x = imageview.x = mousePos.y()\n y = imageview.y = mousePos.x()\n self._navCtrl.positionCursor( x, y, self._navCtrl._views.index(imageview))\n \n #like this\n myInt = MyInterpreter\n viewer.editor.navigationInterpreterType = myInt\n \n #or like this\n tmpInt = viewer.editor.navigationInterpreterType\n tmpInt.onMouseMove_default = myInt.onMouseMove_default\n viewer.editor.navigationInterpreterType = tmpInt\n \n app.exec_()\n","sub_path":"volumina/viewer.py","file_name":"viewer.py","file_ext":"py","file_size_in_byte":12622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"300666774","text":"import sys, re, os, json\nimport pyphy\n\nrx_name = re.compile(r'NAME\\s+(\\w+,)+\\s+(\\d+)')\nrx_definition = re.compile(r'DEFINITION\\s+(.+)')\n\ntop_folder = sys.argv[1]\nfilter_rank = sys.argv[2]\nfilter_taxon = sys.argv[3]\n\nshort_taxid = {}\n\ntaxid_taxon = {}\n\nparent_son = set()\n\ndesired_rank = [\"superkingdom\", \"phylum\", \"class\", \"order\", \"family\", \"genus\", \"species\", \"genome\"]\n\n#print (\",\".join([\"taxid\", \"genome_size\", \"name\"]))\n\nwith open(\"taxon.csv\", \"w\") as output:\n output.write(\",\".join([\"taxid\", \"name\", \"rank\"]) + \"\\n\")\n\nwith open(\"connections.csv\", \"w\") as output:\n output.write(\",\".join([\"from\", \"to\"]) + \"\\n\")\n \n\nfor (head, dirs, files) in os.walk(top_folder):\n for file in files:\n current_file_path = os.path.abspath(os.path.dirname(os.path.join(head, file)))\n with_name = os.path.join(current_file_path, file)\n\n short_cut = []\n definition = \"\"\n taxid = -1\n\n for line in open(with_name, 'r'):\n search_definition = rx_definition.search(line)\n\n if search_definition:\n definition = search_definition.group(1)\n\n \n if line.startswith(\"NAME\"):\n\n search_name = re.findall(r'(\\w+),', line)\n\n for name in search_name:\n short_cut.append(name)\n\n search_taxid = re.findall(r',\\s+(\\d+)', line)\n\n for t in search_taxid:\n taxid = int(t)\n\n\n #print (file)\n\n if taxid != -1:\n dict_path = pyphy.getDictPathByTaxid(taxid)\n\n dict_path[\"genome\"] = taxid\n quartett = [\"\"] * 2\n\n #print (dict_path)\n\n if filter_rank in dict_path and pyphy.getNameByTaxid(dict_path[filter_rank]) == filter_taxon:\n\n for rank in desired_rank:\n if rank in dict_path:\n name = definition.split(\",\")[0]\n if rank != \"genome\":\n name = pyphy.getNameByTaxid(dict_path[rank])\n\n if rank == \"superkingdom\":\n\n\n quartett[0] = dict_path[rank]\n\n if dict_path[rank] not in taxid_taxon:\n taxid_taxon[dict_path[rank]] = [name, rank]\n else:\n\n quartett[1] = dict_path[rank]\n\n if quartett[0] != quartett[1]:\n parent_son.add(tuple(quartett))\n\n if dict_path[rank] not in taxid_taxon:\n taxid_taxon[dict_path[rank]] = [name, rank]\n\n quartett[0] = quartett[1]\n\n with open(\"mapping.tsv\", \"a+\") as output:\n for short in short_cut:\n output.write(f\"{short}\\t{taxid}\\n\")\n\n#print (taxid_taxon)\n\n#print (parent_son)\n\nfor taxid in taxid_taxon:\n with open(\"taxon.csv\", \"a\") as output:\n output.write(\",\".join([f\"{taxid}\", f'{taxid_taxon[taxid][0]}', taxid_taxon[taxid][1]]) + \"\\n\")\n\nfor parent in parent_son:\n with open(\"connections.csv\", \"a\") as output:\n #[\"~id\", \"~from\", \"~to\", \"~label\"]\n output.write(\",\".join([f\"{parent[0]}\", f\"{parent[1]}\"]) + \"\\n\")","sub_path":"genome_parser.py","file_name":"genome_parser.py","file_ext":"py","file_size_in_byte":3285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"532037651","text":"import requests\nfrom datetime import datetime\nimport smtplib\nimport time\n\nMY_LAT = -28.262350\nMY_LNG = -52.408989\n\nMY_EMAIL = \"matheus.pytests@gmail.com\"\nPASSWORD = \"smfc65wk%kf\"\n\ndef is_iss_overhead():\n response = requests.get(url=\"http://api.open-notify.org/iss-now.json\")\n response.raise_for_status()\n\n data = response.json()\n\n iss_longitude = float(data[\"iss_position\"][\"longitude\"])\n iss_latitude = float(data[\"iss_position\"][\"latitude\"])\n\n if (MY_LAT - 5 <= iss_latitude <= MY_LAT + 5) and (MY_LNG -5 <= iss_longitude <= MY_LNG +5):\n return True\n# Your position is within +5 or -5 degrees of the ISS position \ndef is_night():\n parameters = {\n \"lat\": MY_LAT,\n \"lng\":MY_LNG,\n \"formatted\": 0,\n }\n\n response = requests.get(\"https://api.sunrise-sunset.org/json\", params=parameters)\n response.raise_for_status()\n data = response.json()\n sunrise = int(data[\"results\"][\"sunrise\"].split(\"T\")[1].split(\":\")[0]) - 2\n sunset = int(data[\"results\"][\"sunset\"].split(\"T\")[1].split(\":\")[0])\n hour_now = datetime.now().hour\n if hour_now > sunset or hour_now < sunrise:\n return True\n\nwhile True:\n time.sleep(60)\n if is_iss_overhead() and is_night():\n with smtplib.SMTP(\"smtp.gmail.com\") as connection:\n connection.starttls()\n connection.login(user=MY_EMAIL, password=PASSWORD)\n connection.sendmail(\n from_addr=MY_EMAIL,\n to_addrs=MY_EMAIL,\n msg=f\"Subject:Look Up\\n\\nThe ISS is above you in the sky.\"\n )\n\n","sub_path":"day_33/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"410971062","text":"import argparse\nimport codecs\nimport json\nimport logging\nimport os\nimport sys\nimport time\n\n#BASE_DIR = \"/Users/cbardas/instapy-log/\"\nBASE_DIR = \"/home/instapy-log/\"\n\nstdout = sys.stdout\nsys.stdout = codecs.getwriter('utf8')(sys.stdout)\nsys.path.append(os.path.join(sys.path[0], '../'))\nfileName = \"resize\" + time.strftime(\"%d.%m.%Y\") + \".log\"\nlogging.basicConfig(format='%(asctime)s %(message)s', filename=BASE_DIR + fileName, level=logging.DEBUG)\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nformatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\nch.setFormatter(formatter)\nlogger = logging.getLogger('[schedule]')\nlogger.setLevel(logging.DEBUG)\nlogger.addHandler(ch)\nimport requests\n\nparser = argparse.ArgumentParser(add_help=True)\nparser.add_argument('-id_droplet', type=str, help=\"id_droplet\")\nparser.add_argument('-size', type=str, help=\"size\")\nargs = parser.parse_args()\n\napiUrl = 'https://rest.angie.one/doapi/'\nauthKey = 'b5a42bd29ebc5697adcec0adf446c26e'\n#EIGHT_GB_SIZE = \"s-4vcpu-8gb\"\n#TWO_GB_SIZE = \"s-1vcpu-2gb\"\n\n#args.id_droplet = \"144926024\"\n#args.size = EIGHT_GB_SIZE\n\nif args.id_droplet is None:\n exit(\"dispatcher: Error: id_droplet is not specified !\")\n\nif args.size is None:\n exit(\"dispatcher: Error: size is not specified !\")\n\n\ndef getDropletStatus(id_droplet):\n url = apiUrl + \"droplet?id_droplet=\" + id_droplet\n logger.info(\"Requesting: %s\", url)\n r = requests.get(url, headers={'Authorization': authKey})\n result = r.content\n result = json.loads(result)\n\n print(result['droplet']['status'])\n\n\ndef shutdownDroplet(id_droplet):\n url = apiUrl + \"shutdown\"\n data = json.dumps({\"id_droplet\": id_droplet})\n logger.info(\"Shutdown: requesting: %s, with data: %s\" % (url, data))\n r = requests.post(url, headers={'Authorization': authKey}, data=data)\n result = r.content\n result = json.loads(result)\n\n logger.info(\"Shutdown: Action Status: %s, id: %s\" % (result['action']['status'], result['action']['id']))\n\n return result['action']['id']\n\n\ndef checkStatus(id_action):\n url = apiUrl + \"status?id_action=\" + str(id_action)\n logger.info(\"Requesting: %s\", url)\n r = requests.get(url, headers={'Authorization': authKey})\n result = r.content\n result = json.loads(result)\n\n logger.info(\"checkStatus: response: %s\", result)\n return result['action']['status']\n\n\ndef powerOn(id_droplet):\n url = apiUrl + \"power\"\n data = json.dumps({\"id_droplet\": id_droplet})\n logger.info(\"Requesting: %s, with data: %s\" % (url, data))\n r = requests.post(url, headers={'Authorization': authKey}, data=data)\n result = r.content\n result = json.loads(result)\n\n logger.info(\"Action Status: %s, id: %s\" % (result['action']['status'], result['action']['id']))\n\n return result['action']['id']\n\n\ndef powerOff(id_droplet):\n url = apiUrl + \"powerOff\"\n data = json.dumps({\"id_droplet\": id_droplet})\n logger.info(\"Requesting: %s, with data: %s\" % (url, data))\n r = requests.post(url, headers={'Authorization': authKey}, data=data)\n result = r.content\n result = json.loads(result)\n\n logger.info(\"Action Status: %s, id: %s\" % (result['action']['status'], result['action']['id']))\n\n return result['action']['id']\n\n\ndef resize(id_droplet, size):\n url = apiUrl + \"resize\"\n data = json.dumps({\"id_droplet\": id_droplet, \"size\": size})\n logger.info(\"Requesting: %s, with data: %s\" % (url, data))\n r = requests.post(url, headers={'Authorization': authKey}, data=data)\n result = r.content\n result = json.loads(result)\n\n logger.info(\"Resize response: %s\", result)\n logger.info(\"Action Status: %s, id: %s\" % (result['action']['status'], result['action']['id']))\n\n return result['action']['id']\n\n\nlogger.info(\"STARTING RESIZE PROCESS FOR DROPLET: %s, SIZE: %s\" % (args.id_droplet, args.size))\n\nwaitForShutdownMin = 1\n\n# SHUTDOWN THE DROPLET\nshutdownId = shutdownDroplet(args.id_droplet)\nlogger.info(\"Going to wait %s minutes for shutdown\", waitForShutdownMin)\ntime.sleep(waitForShutdownMin * 60)\nlogger.info(\"Done waiting, going to check the status.\")\nstatus = checkStatus(shutdownId)\n\nif status != \"completed\":\n logger.info(\"Shutdown failed, status: %s. Going to powerOff\", status)\n powerOffId = powerOff(args.id_droplet)\n logger.info(\"Going to wait %s minute for powerOff\", waitForShutdownMin)\n time.sleep(waitForShutdownMin * 60)\n powerOffStatus = checkStatus(powerOffId)\n\n if powerOffStatus != 'completed':\n logger.info(\"PowerOff failed, status: %s\", powerOffStatus)\n raise Exception('Could not powerOff the droplet.')\n\nlogger.info(\"Done shutting down, going to resize the droplet...\")\n\nresizeWaitMin = 5\nresizeId = resize(args.id_droplet, args.size)\nlogger.info(\"Going to wait %s minutes for resize\", resizeWaitMin)\ntime.sleep(resizeWaitMin * 60)\nlogger.info(\"done waiting, going to check the resize status\")\n\nresizeStatus = checkStatus(resizeId)\nif resizeStatus != 'completed':\n logger.info('Resize failed, status: %s', resizeStatus)\n raise Exception('Resize failed, status: %s', resizeStatus)\n\nlogger.info(\"Done resizing, going to startup the machine\")\nstartupId = powerOn(args.id_droplet)\nstartUpWait = 2\nlogger.info(\"Going to wait %s minutes for powerOn\" % (startUpWait))\n\ntime.sleep(resizeWaitMin * 60)\nlogger.info(\"Done waiting... going to check the startup status\")\nstartupStatus = checkStatus(startupId)\n\nif startupStatus != 'completed':\n logger.info(\"Startup failed, status: %s\", startupStatus)\n raise Exception(\"Startup failed...\")\n\nlogger.info(\"Done resizing the machine, going to exit !\")\n","sub_path":"resize.py","file_name":"resize.py","file_ext":"py","file_size_in_byte":5587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"230725657","text":"from zoundry.appframework.global_services import getApplicationModel\r\nfrom zoundry.appframework.global_services import getResourceRegistry\r\nfrom zoundry.appframework.ui.util.fontutil import getDefaultFontBold\r\nfrom zoundry.appframework.ui.widgets.controls.dynamic.widgetfactory import ZWidgetFactory\r\nfrom zoundry.appframework.ui.widgets.controls.validating.standard.combobox import ZValidatingBitmapComboBox\r\nfrom zoundry.appframework.ui.widgets.controls.validating.standard.textctrl import ZValidatingTextCtrl\r\nfrom zoundry.appframework.ui.widgets.controls.validating.validatingctrl import ZBaseControlValidator\r\nfrom zoundry.appframework.ui.widgets.controls.validating.validatingctrl import ZNonEmptySelectionValidator\r\nfrom zoundry.appframework.ui.widgets.dialogs.wizard import ZAbstractPropertyBasedWizardPage\r\nfrom zoundry.appframework.ui.widgets.dialogs.wizard import ZAbstractPropertyWizardSession\r\nfrom zoundry.appframework.ui.widgets.dialogs.wizard import ZWizard\r\nfrom zoundry.appframework.ui.widgets.dialogs.wizard import ZWizardPage\r\nfrom zoundry.blogapp.constants import IZBlogAppServiceIDs\r\nfrom zoundry.blogapp.messages import _extstr\r\nfrom zoundry.blogapp.models.ui.wizard.newmediastoragemodel import ZNewMediaStorageWizardModel\r\nfrom zoundry.blogapp.services.mediastorage.mediastoragetype import IZMediaStorageCapabilities\r\nfrom zoundry.blogapp.ui.menus.mediastoragemanager import ZMediaStorageMenuActionContext\r\nfrom zoundry.blogapp.ui.menus.mediastoragemanager import ZTestMediaStorageMenuAction\r\nimport wx\r\n\r\n# ------------------------------------------------------------------------------------------\r\n# A wizard session object for use by the New Media Storage wizard.\r\n# ------------------------------------------------------------------------------------------\r\nclass ZNewMediaStorageWizardSession(ZAbstractPropertyWizardSession):\r\n\r\n def __init__(self):\r\n ZAbstractPropertyWizardSession.__init__(self)\r\n # end __init__()\r\n\r\n# end ZNewMediaStorageWizardSession\r\n\r\n\r\n# ------------------------------------------------------------------------------------------\r\n# The implementation of the New Media Storage wizard.\r\n# ------------------------------------------------------------------------------------------\r\nclass ZNewMediaStorageWizard(ZWizard):\r\n\r\n def __init__(self, parent):\r\n self.model = ZNewMediaStorageWizardModel()\r\n ZWizard.__init__(self, parent, ZNewMediaStorageWizardSession(), wx.ID_ANY, _extstr(u\"mediastoragewizard.NewMediaStorageWizard\")) #$NON-NLS-1$\r\n # end __init__()\r\n\r\n def _createWizardPages(self):\r\n self.sitePage = ZNewMediaStorageWizardSitePage(self.model, self)\r\n self.paramsPage = ZNewMediaStorageWizardParamsPage(self.model, self)\r\n self.confirmPage = ZNewMediaStorageWizardConfirmPage(self.model, self)\r\n self.addPage(self.sitePage)\r\n self.addPage(self.paramsPage)\r\n self.addPage(self.confirmPage)\r\n # end _createWizardPages()\r\n\r\n def _layoutWidgets(self):\r\n ZWizard._layoutWidgets(self)\r\n\r\n (w, h) = self.GetBestSizeTuple()\r\n w = max(w, 450)\r\n self.SetSize(wx.Size(w, h))\r\n # end _layoutWidgets()\r\n\r\n def _getDefaultImage(self):\r\n return getResourceRegistry().getImagePath(u\"images/wizards/newmediastorage.png\") #$NON-NLS-1$\r\n # end _getDefaultImage()\r\n\r\n def getMediaStorageName(self):\r\n return self.session.getProperty(u\"type-page.name\") #$NON-NLS-1$\r\n # endgetMediaStorageName()\r\n\r\n def getMediaSiteId(self):\r\n return self.session.getProperty(u\"type-page.site\").getId() #$NON-NLS-1$\r\n # end getMediaSiteId()\r\n\r\n def getMediaStorageProperties(self):\r\n return self.session.getProperty(u\"params-page.properties\") #$NON-NLS-1$\r\n # end getMediaStorageProperties()\r\n\r\n# end ZNewMediaStorageWizard\r\n\r\n\r\n# ------------------------------------------------------------------------------------------\r\n# The base class for all New Media Storage Wizard pages.\r\n# ------------------------------------------------------------------------------------------\r\nclass ZNewMediaStorageWizardPage(ZAbstractPropertyBasedWizardPage):\r\n\r\n def __init__(self, model, parent):\r\n ZAbstractPropertyBasedWizardPage.__init__(self, model, parent)\r\n # end __init__()\r\n\r\n def getImage(self):\r\n return None\r\n # end getImage()\r\n\r\n# end ZNewMediaStorageWizardPage\r\n\r\n\r\n# -------------------------------------------------------------------------------------\r\n# Validator for the value of the Media Storage Name field.\r\n# -------------------------------------------------------------------------------------\r\nclass ZMediaStorageNameValidator(ZBaseControlValidator):\r\n\r\n def __init__(self, model):\r\n self.model = model\r\n ZBaseControlValidator.__init__(self)\r\n # end __init__()\r\n\r\n def _isValid(self, value):\r\n if not value:\r\n return self._setReason(_extstr(u\"mediastoragewizard.EmptyStoreNameError\")) #$NON-NLS-1$\r\n\r\n if self.model.mediaStoreExists(value):\r\n return self._setReason(_extstr(u\"mediastoragewizard.StoreAlreadyExistsError\") % value) #$NON-NLS-1$\r\n\r\n return True\r\n # end _isValid()\r\n\r\n# end ZMediaStorageNameValidator\r\n\r\n\r\n# ------------------------------------------------------------------------------------------\r\n# The first page of the new media storage wizard. This page displays a list of sites that\r\n# the user can choose from.\r\n# ------------------------------------------------------------------------------------------\r\nclass ZNewMediaStorageWizardSitePage(ZNewMediaStorageWizardPage):\r\n\r\n def __init__(self, model, parent):\r\n ZNewMediaStorageWizardPage.__init__(self, model, parent)\r\n self.customPages = None\r\n self.site = None\r\n # end __init__()\r\n\r\n def _createWidgets(self):\r\n self.description1 = wx.StaticText(self, wx.ID_ANY, _extstr(u\"mediastoragewizard.WelcomeMessage\")) #$NON-NLS-1$\r\n self.description1.SetFont(getDefaultFontBold())\r\n self.description2 = wx.StaticText(self, wx.ID_ANY, _extstr(u\"mediastoragewizard.WizardDescription\"), size = wx.Size(-1, 80)) #$NON-NLS-1$\r\n self.siteLabel = wx.StaticText(self, wx.ID_ANY, _extstr(u\"mediastoragewizard.MediaStorageType\")) #$NON-NLS-1$\r\n comboValidator = ZNonEmptySelectionValidator(_extstr(u\"mediastoragewizard.EmptyStoreTypeSelectionError\")) #$NON-NLS-1$\r\n self.siteCombo = ZValidatingBitmapComboBox(comboValidator, self, wx.ID_ANY, style = wx.CB_READONLY)\r\n self.siteCombo.SetToolTipString(_extstr(u\"mediastoragewizard.StoreTypeComboTooltip\")) #$NON-NLS-1$\r\n self.nameLabel = wx.StaticText(self, wx.ID_ANY, _extstr(u\"mediastoragewizard.MediaStorageName\")) #$NON-NLS-1$\r\n nameValidator = ZMediaStorageNameValidator(self._getModel())\r\n self.nameText = ZValidatingTextCtrl(nameValidator, self, wx.ID_ANY)\r\n self.nameText.SetToolTipString(_extstr(u\"mediastoragewizard.MediaStorageNameTooltip\")) #$NON-NLS-1$\r\n self.clickHereHyperlink = wx.HyperlinkCtrl(self, wx.ID_ANY, _extstr(u\"mediastoragewizard.NoStorageLink\"), u\"http://picasaweb.google.com/?ref=ZoundryRaven\") #$NON-NLS-2$ #$NON-NLS-1$\r\n # end _createWidgets()\r\n\r\n def _populateWidgets(self):\r\n mediaSites = self._getModel().getMediaSites()\r\n for site in mediaSites:\r\n iconPath = site.getIconPath()\r\n bitmap = getResourceRegistry().getBitmap(iconPath)\r\n self.siteCombo.Append(site.getDisplayName(), None, bitmap)\r\n # end _populateWidgets()\r\n\r\n def _bindWidgetEvents(self):\r\n self._bindValidatingWidget(self.siteCombo)\r\n self._bindValidatingWidget(self.nameText)\r\n # end _bindWidgetEvents()\r\n\r\n def _layoutWidgets(self):\r\n flexGridSizer = wx.FlexGridSizer(2, 2, 5, 5)\r\n flexGridSizer.AddGrowableCol(1)\r\n flexGridSizer.Add(self.siteLabel, 0, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)\r\n flexGridSizer.Add(self.siteCombo, 0, wx.EXPAND | wx.RIGHT, 5)\r\n flexGridSizer.Add(self.nameLabel, 0, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)\r\n flexGridSizer.Add(self.nameText, 0, wx.EXPAND | wx.RIGHT, 5)\r\n flexGridSizer.Add(wx.StaticText(self, wx.ID_ANY, u\"\"), 0, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5) #$NON-NLS-1$\r\n flexGridSizer.Add(self.clickHereHyperlink, 0, wx.EXPAND | wx.RIGHT, 5)\r\n\r\n box = wx.BoxSizer(wx.VERTICAL)\r\n box.Add(self.description1, 0, wx.EXPAND | wx.ALL, 10)\r\n box.Add(self.description2, 0, wx.EXPAND | wx.ALL, 10)\r\n box.AddSizer(flexGridSizer, 0, wx.EXPAND)\r\n self.SetAutoLayout(True)\r\n self.SetSizer(box)\r\n # end _layoutWidgets()\r\n\r\n def getDataProperties(self):\r\n rval = {}\r\n rval[u\"type-page.site\"] = self._getSelectedSite() #$NON-NLS-1$\r\n rval[u\"type-page.name\"] = self.nameText.GetValue() #$NON-NLS-1$\r\n return rval\r\n # end getDataProperties()\r\n\r\n def onEnter(self, session, eventDirection): #@UnusedVariable\r\n if eventDirection == ZWizardPage.NEXT:\r\n mediaSites = self._getModel().getMediaSites()\r\n idx = 0\r\n for site in mediaSites:\r\n if site.getId() == u\"zoundry.blogapp.mediastorage.site.picasa\": #$NON-NLS-1$\r\n self.siteCombo.Select(idx)\r\n self.siteCombo.validate()\r\n self.nameText.SetFocus()\r\n return\r\n idx = idx + 1\r\n\r\n self.siteCombo.SetFocus()\r\n # end onEnter()\r\n\r\n def onExit(self, session, eventDirection): #@UnusedVariable\r\n if eventDirection == ZWizardPage.NEXT:\r\n site = self._getSelectedSite()\r\n if site != self.site:\r\n self.site = site\r\n # Remove any old custom pages we may have had\r\n if self.customPages:\r\n for page in self.customPages:\r\n # magic number - the position of the first custom \r\n # page and the position of all custom pages as they \r\n # are removed\r\n self.wizard.removePage(2)\r\n self.customPages = None\r\n # Now add in new pages\r\n pages = []\r\n wizardPageClasses = self.site.createContributedWizardPages()\r\n insertionPoint = 2\r\n for wizardPageClass in wizardPageClasses:\r\n page = wizardPageClass(self.model, self.wizard)\r\n pages.append(page)\r\n self.wizard.addPage(page, insertionPoint)\r\n insertionPoint = insertionPoint + 1\r\n self.customPages = pages\r\n return True\r\n # end onExit()\r\n\r\n def _getSelectedSite(self):\r\n idx = self.siteCombo.GetSelection()\r\n return self._getModel().getMediaSites()[idx]\r\n # end _getSelectedSite()\r\n\r\n# end ZNewMediaStorageTypePage\r\n\r\n\r\n# ------------------------------------------------------------------------------------------\r\n# The first page of the new media storage wizard. This page displays a list of sites that\r\n# the user can choose from.\r\n# ------------------------------------------------------------------------------------------\r\nclass ZNewMediaStorageWizardParamsPage(ZNewMediaStorageWizardPage):\r\n\r\n def __init__(self, model, parent):\r\n self.mediaSite = None\r\n self.widgetFactory = ZWidgetFactory(self, True)\r\n self.widgets = []\r\n\r\n ZNewMediaStorageWizardPage.__init__(self, model, parent)\r\n # end __init__()\r\n\r\n def onEnter(self, session, eventDirection): #@UnusedVariable\r\n self.storeName = session.getProperty(u\"type-page.name\") #$NON-NLS-1$\r\n selectedMediaSite = session.getProperty(u\"type-page.site\") #$NON-NLS-1$\r\n if self.mediaSite != selectedMediaSite:\r\n self.mediaSite = selectedMediaSite\r\n self.DestroyChildren()\r\n self.widgets = []\r\n self._createSiteWidgets()\r\n self._layoutSiteWidgets()\r\n self._validate()\r\n # Commented out because SetFocus() was causing some strange behavior...\r\n# (name, label, widget) = self.widgets[0] #@UnusedVariable\r\n# widget.SetFocus()\r\n # end onEnter()\r\n\r\n def _createSiteWidgets(self):\r\n self.staticBox = wx.StaticBox(self, wx.ID_ANY, _extstr(u\"mediastoragewizard._Settings\") % self.mediaSite.getDisplayName()) #$NON-NLS-1$\r\n siteProps = self.mediaSite.getProperties()\r\n for siteProp in siteProps:\r\n if siteProp.getType() == u\"hidden\": #$NON-NLS-1$\r\n continue\r\n label = self._createLabelFromProperty(siteProp)\r\n name = siteProp.getName()\r\n control = self._createControlFromProperty(siteProp)\r\n self.widgets.append( (name, label, control) )\r\n # end _createSiteWidgets()\r\n\r\n def _createLabelFromProperty(self, siteProp):\r\n label = siteProp.getDisplayName()\r\n if siteProp.getType() == u\"checkbox\": #$NON-NLS-1$\r\n label = u\"\" #$NON-NLS-1$\r\n else:\r\n label = label + u\":\" #$NON-NLS-1$\r\n return wx.StaticText(self, wx.ID_ANY, label)\r\n # end _createLabelFromProperty()\r\n\r\n def _createControlFromProperty(self, siteProp):\r\n widgetFactoryProps = {}\r\n widgetFactoryProps[u\"type\"] = siteProp.getType() #$NON-NLS-1$\r\n widgetFactoryProps[u\"name\"] = siteProp.getName() #$NON-NLS-1$\r\n widgetFactoryProps[u\"value\"] = siteProp.getDefaultValue() #$NON-NLS-1$\r\n widgetFactoryProps[u\"label\"] = siteProp.getDisplayName() #$NON-NLS-1$\r\n widgetFactoryProps[u\"tooltip\"] = siteProp.getTooltip() #$NON-NLS-1$\r\n widgetFactoryProps[u\"validation-regexp\"] = siteProp.getValidationRegexp() #$NON-NLS-1$\r\n widgetFactoryProps[u\"validation-error-message\"] = siteProp.getValidationErrorMessage() #$NON-NLS-1$\r\n\r\n return self.widgetFactory.createWidget(widgetFactoryProps)\r\n # end _createControlFromProperty()\r\n\r\n def _layoutSiteWidgets(self):\r\n box = wx.BoxSizer(wx.VERTICAL)\r\n staticBoxSizer = wx.StaticBoxSizer(self.staticBox, wx.VERTICAL)\r\n\r\n flexGridSizer = wx.FlexGridSizer(len(self.widgets), 2, 5, 5)\r\n flexGridSizer.AddGrowableCol(1)\r\n\r\n for (name, label, widget) in self.widgets: #@UnusedVariable\r\n flexGridSizer.Add(label, 0, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)\r\n flexGridSizer.Add(widget, 0, wx.EXPAND | wx.RIGHT, 5)\r\n\r\n staticBoxSizer.AddSizer(flexGridSizer, 1, wx.EXPAND | wx.ALL, 5)\r\n box.AddSizer(staticBoxSizer, 0, wx.EXPAND | wx.ALL, 10)\r\n\r\n self.SetAutoLayout(True)\r\n self.SetSizer(box)\r\n self.Layout()\r\n # end _layoutSiteWidgets()\r\n\r\n def getDataProperties(self):\r\n rval = {}\r\n\r\n storeProperties = {}\r\n for (name, label, widget) in self.widgets: #@UnusedVariable\r\n storeProperties[name] = unicode(widget.GetValue())\r\n\r\n rval[u\"params-page.properties\"] = storeProperties #$NON-NLS-1$\r\n return rval\r\n # end getDataProperties()\r\n\r\n# end ZNewMediaStorageWizardParamsPage\r\n\r\n\r\n# ------------------------------------------------------------------------------------------\r\n# The first page of the new media storage wizard. This page displays a list of sites that\r\n# the user can choose from.\r\n# ------------------------------------------------------------------------------------------\r\nclass ZNewMediaStorageWizardConfirmPage(ZNewMediaStorageWizardPage):\r\n\r\n def __init__(self, model, parent):\r\n ZNewMediaStorageWizardPage.__init__(self, model, parent)\r\n self.mediaSite = None\r\n self.propertyValues = {}\r\n self.session = None\r\n # end __init__()\r\n\r\n def onEnter(self, session, eventDirection): #@UnusedVariable\r\n self.session = session\r\n self.mediaSite = session.getProperty(u\"type-page.site\") #$NON-NLS-1$\r\n self.propertyValues = session.getProperty(u\"params-page.properties\") #$NON-NLS-1$\r\n self.DestroyChildren()\r\n self.widgets = []\r\n self._createConfirmWidgets()\r\n self._layoutConfirmWidgets()\r\n self._bindConfirmWidgetEvents()\r\n # end onEnter()\r\n\r\n def _createConfirmWidgets(self):\r\n self.confirmText = wx.StaticText(self, wx.ID_ANY, _extstr(u\"mediastoragewizard.ConfirmSettingsText\")) #$NON-NLS-1$\r\n self.staticBox = wx.StaticBox(self, wx.ID_ANY, _extstr(u\"mediastoragewizard.Settings\")) #$NON-NLS-1$\r\n siteProps = self.mediaSite.getProperties()\r\n for siteProp in siteProps:\r\n label = self._createLabelFromProperty(siteProp)\r\n valueLabel = self._createLabelFromPropertyValue(siteProp)\r\n self.widgets.append( (label, valueLabel) )\r\n\r\n self.testButton = wx.Button(self, wx.ID_ANY, _extstr(u\"mediastoragewizard.TestSettings\")) #$NON-NLS-1$\r\n storage = self._createMediaStorageForTesting()\r\n if not storage.getCapabilities().hasCapability(IZMediaStorageCapabilities.KEY_SUPPORTS_DELETE):\r\n self.testButton.Show(False)\r\n # end _createConfirmWidgets()\r\n\r\n def _createLabelFromProperty(self, siteProp):\r\n label = siteProp.getDisplayName()\r\n label = label + u\":\" #$NON-NLS-1$\r\n textCtrl = wx.StaticText(self, wx.ID_ANY, label)\r\n textCtrl.SetFont(getDefaultFontBold())\r\n return textCtrl\r\n # end _createLabelFromProperty()\r\n\r\n def _createLabelFromPropertyValue(self, siteProp):\r\n name = siteProp.getName()\r\n label = self.propertyValues[name]\r\n if name == u\"password\": #$NON-NLS-1$\r\n label = u\"******\" #$NON-NLS-1$\r\n text = wx.StaticText(self, wx.ID_ANY, label)\r\n text.SetToolTipString(label)\r\n return text\r\n # end _createLabelFromPropertyValue()\r\n\r\n def _layoutConfirmWidgets(self):\r\n box = wx.BoxSizer(wx.VERTICAL)\r\n staticBoxSizer = wx.StaticBoxSizer(self.staticBox, wx.VERTICAL)\r\n\r\n flexGridSizer = wx.FlexGridSizer(len(self.widgets), 2, 5, 5)\r\n flexGridSizer.AddGrowableCol(1)\r\n\r\n for (label, widget) in self.widgets: #@UnusedVariable\r\n flexGridSizer.Add(label, 0, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)\r\n flexGridSizer.Add(widget, 0, wx.EXPAND | wx.RIGHT, 5)\r\n\r\n staticBoxSizer.AddSizer(flexGridSizer, 1, wx.EXPAND | wx.ALL, 5)\r\n box.Add(self.confirmText, 0, wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, 10)\r\n box.AddSizer(staticBoxSizer, 0, wx.EXPAND | wx.ALL, 10)\r\n box.Add(self.testButton, 0, wx.LEFT, 10)\r\n\r\n self.SetAutoLayout(True)\r\n self.SetSizer(box)\r\n self.Layout()\r\n # end _layoutConfirmWidgets()\r\n\r\n def _bindConfirmWidgetEvents(self):\r\n self.Bind(wx.EVT_BUTTON, self.onTest, self.testButton)\r\n # end _bindConfirmWidgetEvents()\r\n\r\n def onTest(self, event):\r\n storage = self._createMediaStorageForTesting()\r\n context = ZMediaStorageMenuActionContext(self, storage, None)\r\n action = ZTestMediaStorageMenuAction()\r\n action.runAction(context)\r\n event.Skip()\r\n # end onTest()\r\n\r\n def getMediaStorageName(self):\r\n return self.session.getProperty(u\"type-page.name\") #$NON-NLS-1$\r\n # endgetMediaStorageName()\r\n\r\n def getMediaSiteId(self):\r\n return self.session.getProperty(u\"type-page.site\").getId() #$NON-NLS-1$\r\n # end getMediaSiteId()\r\n\r\n def getMediaStorageProperties(self):\r\n return self.session.getProperty(u\"params-page.properties\") #$NON-NLS-1$\r\n # end getMediaStorageProperties()\r\n\r\n def _createMediaStorageForTesting(self):\r\n storageService = getApplicationModel().getService(IZBlogAppServiceIDs.MEDIA_STORAGE_SERVICE_ID)\r\n name = self.getMediaStorageName()\r\n mediaSiteId = self.getMediaSiteId()\r\n properties = self.getMediaStorageProperties()\r\n persist = False\r\n storage = storageService.createMediaStorage(name, mediaSiteId, properties, persist)\r\n return storage\r\n # end _createMediaStorageForTesting()\r\n\r\n# end ZNewMediaStorageWizardConfirmPage\r\n","sub_path":"src/python/zoundry/blogapp/ui/wizards/mediastoragewizard.py","file_name":"mediastoragewizard.py","file_ext":"py","file_size_in_byte":20134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"26918620","text":"from django import forms\nfrom django.contrib.auth.forms import UserCreationForm, UserChangeForm\nfrom .models import CustomUser, File, Folder\nimport os\nfrom django.conf import settings\nfrom django import forms\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth import get_user_model\n\nUser = get_user_model()\n\n# Create your forms here.\n\nclass NewUserForm(UserCreationForm):\n email = forms.EmailField(required=True)\n\n class Meta:\n model = User\n fields = (\"username\", \"email\", \"password1\", \"password2\",)\n exclude = (\"available_space\",)\n\n def save(self, commit=True):\n user = super(NewUserForm, self).save(commit=False)\n user.email = self.cleaned_data['email']\n if commit:\n user.save()\n dirname = user.username # 2010.08.09.12.08.45\n try:\n path = os.path.join(settings.MEDIA_ROOT, dirname)\n os.mkdir(path)\n folder = Folder(user = user, name = user.username, is_root = True, path = path)\n folder.save()\n except:\n print(\"Błąd dodawania folderu\")\n return user\n\n\n\nclass CustomUserCreationForm(UserCreationForm):\n\n class Meta:\n model = CustomUser\n fields = ('username', 'email')\n\nclass CustomUserChangeForm(UserChangeForm):\n\n class Meta:\n model = CustomUser\n fields = ('username', 'email')\n\nclass FileForm(forms.ModelForm):\n class Meta:\n model = File\n\n fields = [\n 'file'\n ]\n exclude = [\n 'size', 'user', 'name','is_shared',\n ]\n\nclass FolderForm(forms.ModelForm):\n\n class Meta:\n model = Folder\n\n fields = [\n 'name'\n ]\n exclude = [\n 'user', 'is_root', 'path', 'superior_path',\n ]\n\n\n\n","sub_path":"cloud/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"179943027","text":"import json\nimport requests\n\ndef readData():\n header = {\"Authorization\": \"AccessToken\"}\n \n response = requests.get(\"https://api.switch-bot.com/v1.0/devices\", headers=header)\n devices = json.loads(response.text)\n #print(devices)\n bots_id = [device[\"deviceId\"] for device in devices['body']['deviceList'] if \"Meter\" == device[\"deviceType\"]]\n \n #for bot_id in bots_id:\n \n response = requests.get(\"https://api.switch-bot.com/v1.0/devices/\" + bots_id[0] + \"/status\", headers=header)\n bot = json.loads(response.text)\n #print(bot)\n \n temperature = bot['body']['temperature']\n humidity = bot['body']['humidity']\n \n value = {\"temperature\": temperature, \"humidity\":humidity}\n return(value)\n #print(\"bot id (\" + bot_id + \") power : \" + power)\n\nif __name__ == '__main__':\n print(readData())\n\n\n","sub_path":"switchbot_getmetervalue.py","file_name":"switchbot_getmetervalue.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"560599376","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\"\"\"\n文章来自 https://zhuanlan.zhihu.com/p/21486804\n\n梯度下降其实就是针对函数求解它的导数,在乘以负梯度。\n因为梯度包含了方向的关系,负梯度就隐含了X变化的方向。\n\"\"\"\n\ndef gd(x_start, step, g):\n x = x_start\n\n for i in range(20):\n grad = g(x)\n x -= grad * step\n print('[ Epoch {0} ] grad = {1}, x = {2}'.format(i, grad, x))\n if abs(grad) < 1e-6:\n break\n return x\n\ndef f(x):\n return x*x - 2*x + 1\n\ndef g(x):\n return 2 * x - 2\n\n\nx = np.linspace(-5,7,100)\ny = f(x)\n\ngd(5,1,g)\n\nplt.plot(x, y)\nplt.show()","sub_path":"实践-机器学习/无痛的机器学习/001-梯度下降是门手艺活/demo1.py","file_name":"demo1.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"466785978","text":"from typing import TYPE_CHECKING, Union, List, Optional\nfrom pyduckdb.spark.sql.types import StructType\n\nPrimitiveType = Union[bool, float, int, str]\nOptionalPrimitiveType = Optional[PrimitiveType]\n\nif TYPE_CHECKING:\n\tfrom pyduckdb.spark.sql.dataframe import DataFrame\n\tfrom pyduckdb.spark.sql.session import SparkSession\n\nclass DataFrameWriter:\n\tdef __init__(self, dataframe: \"DataFrame\"):\n\t\tself.dataframe = dataframe\n\n\tdef saveAsTable(self, table_name: str) -> None:\n\t\trelation = self.dataframe.relation\n\t\trelation.create(table_name)\n\nclass DataFrameReader:\n\tdef __init__(self, session: \"SparkSession\"):\n\t\traise NotImplementedError\n\t\tself.session = session\n\n\tdef load(self, path: Union[str, List[str], None] = None, format: Optional[str] = None, schema: Union[StructType, str, None] = None, **options: OptionalPrimitiveType) -> \"DataFrame\":\n\t\tfrom pyduckdb.spark.sql.dataframe import DataFrame\n\t\traise NotImplementedError\n\n__all__ = [\n\t\"DataFrameWriter\",\n\t\"DataFrameReader\"\n]\n","sub_path":"tools/pythonpkg/pyduckdb/spark/sql/readwriter.py","file_name":"readwriter.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"13459895","text":"#!/usr/bin/python\n\nimport sys\n\n'''\nso the problem here is that we need to find all the permutations of the possible plays of rock, paper, scissors for each player there is. The more players, three times more plays. This leads to an O(3^n) solution.\n\nn = 1\n\n[[\"rock\"], [\"paper\"], [\"scissors\"]] -- we append in its own array, each of the possible plays for 1 player. \n\nn = 2\n\n[\n [\"rock\", \"rock\"], [\"rock\", \"paper\"], [\"rock\", \"scissors],\n [\"paper\", \"rock\"], [\"paper\", \"paper\"], [\"paper\", \"scissors\"],\n [\"scissors\", \"rock\"], [\"scissors\", \"paper\"], [\"scissors\", \"scissors\"]\n]\n\n-- Here we see that we have triple the amount of permutations. We use the n = 1 array to add each of those items to each index of the mutated array to create a new permutation, until we have exhausted all the plays. \n\n\n\n'''\ndef rock_paper_scissors(n):\n\n choices = [\"rock\", \"paper\", \"scissors\"]\n possible_hands = []\n \n #helper func\n def find_results(num_rounds_left, result=[]):\n if num_rounds_left == 0:\n possible_hands.append(result)\n return\n for choice in choices:\n new_result = result + [choice]\n find_results(num_rounds_left - 1, new_result)\n \n find_results(n, [])\n return possible_hands\n \n\"\"\"\nIterative implementation or rps\n\"\"\"\n# def rock_paper_scissors_iterative(n):\n# output = []\n# possible_plays = ['scissors', 'paper', 'rock']\n\n# stack = []\n# stack.append([])\n\n# while len(stack) > 0:\n# hand = stack.pop()\n\n# if n == 0 or len(hand) == n:\n# output.append(hand)\n# else:\n# for play in possible_plays:\n# stack.append(hand + [play])\n\n# return output\n\n\n\n \n\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n num_plays = int(sys.argv[1])\n print(rock_paper_scissors(num_plays))\n else:\n print('Usage: rps.py [num_plays]')","sub_path":"4-rock_paper_scissors/rps.py","file_name":"rps.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"443064589","text":"#!/usr/bin/env python\n\nimport os\nfrom google.appengine.api import users\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp.util import run_wsgi_app\nfrom google.appengine.ext.webapp import template\n\nclass ComingSoonHandler(webapp.RequestHandler):\n\tdef get(self):\n\t\tcontent = \"Coming soon\"\n\t\tcontent += '
    admin login' % users.create_login_url(\"/\") \n\n\t\ttemplate_values = { 'content': content }\n\n\t\tpath = os.path.join(os.path.dirname(__file__), 'comingsoon.html')\n\t\tself.response.out.write(template.render(path, template_values))\n\n\napplication = webapp.WSGIApplication([('/comingsoon', ComingSoonHandler)],\n\t\t debug=True)\n\ndef main():\n run_wsgi_app(application)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"comingsoon.py","file_name":"comingsoon.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"218923518","text":"import os, sys, pygame, os, random\nfrom pygame.locals import *\nfrom pygame import constants\nfrom pgu import gui\n\nfrom constants import *\nfrom base import Scene, Rural, Suburb, GameObject\n\nclass TestScene(Scene):\n title = \"Having a House\"\n prompt = \"How would you like to care for your home?\"\n\n def render(self, screen):\n super().render(screen)\n house_img = pygame.image.load(os.path.join(BASE_PATH, 'data', 'rural_base_blue.png'))\n house_img = pygame.transform.scale(house_img, (200, 250))\n screen.blit(house_img, [400,300])\n for image, coords, scale in self.gameobject.visualizations:\n image = pygame.image.load(os.path.join(BASE_PATH, 'data', image))\n image = pygame.transform.scale(image, scale)\n screen.blit(image, coords)\n\n\nclass TitleScene(Scene):\n def __init__(self, gameobject):\n self.font_l = pygame.font.SysFont(None, 42)\n self.font_m = pygame.font.SysFont(None, 26)\n self.font_s = pygame.font.SysFont(None, 24)\n self.title = self.font_l.render(TITLE, True, WHITE)\n self.author = self.font_m.render('By ips', True, WHITE)\n self.instructions = [\n self.font_s.render(\"> Press 'Space Bar' for Start Game\", True, WHITE),\n self.font_s.render(\"> Press 'Escape' to Exit\", True, WHITE),\n # self.font_s.render(\"> Press 'O' to Load Game \", True, WHITE),\n # self.font_s.render(\"> Press 'I' for instructions\", True, WHITE)\n ]\n super().__init__(gameobject)\n\n def update(self):\n pass\n\n def handle_events(self, events):\n for e in events:\n if e.type == KEYDOWN and e.key == K_ESCAPE:\n terminate()\n if e.type == KEYDOWN and e.key == K_SPACE:\n self.manager.go_to(StartScene(self.gameobject))\n\n def render(self, screen):\n screen.fill(BLACK)\n image = pygame.image.load(os.path.join(BASE_PATH, 'data', 'background_image.png'))\n image = pygame.transform.scale(image, (SCREEN_WIDTH, SCREEN_HEIGHT))\n screen.blit(image, [0,0])\n screen.blit(self.title, (20, 25))\n screen.blit(self.author, (20, 70))\n accumulated_height = 120\n for instruction in self.instructions:\n screen.blit(instruction, (25, accumulated_height))\n accumulated_height += 35\n\n\nclass StartScene(Scene):\n title = ''\n prompt = 'Choose your home type'\n background = 'background_image.png'\n\n def __init__(self, gameobject):\n super().__init__(gameobject)\n self.instructions = [\n self.font_s.render(\"> A. 2BD/2BA in Suburbia\", True, WHITE),\n self.font_s.render(\"> B. 2BD/2BA Rural Property\", True, WHITE)\n ]\n\n def update(self):\n pass\n\n def render(self, screen):\n screen.fill(BLACK)\n if self.background:\n image = pygame.image.load(os.path.join(BASE_PATH, 'data', self.background))\n image = pygame.transform.scale(image, (SCREEN_WIDTH, SCREEN_HEIGHT))\n screen.blit(image, [0, 0])\n title = self.font_l.render(self.title, True, WHITE)\n screen.blit(title, XY_TITLE)\n prompt = self.font_m.render(self.prompt, True, WHITE)\n screen.blit(prompt, XY_PROMPT)\n\n instruction_height = 100\n for instruction in self.instructions:\n screen.blit(instruction, (45, instruction_height))\n instruction_height += 25\n\n def handle_events(self, events):\n for e in events:\n if e.type == KEYDOWN and e.key == K_a:\n self.gameobject.house = Suburb()\n self.manager.go_to(TestScene(self.gameobject))\n if e.type == KEYDOWN and e.key == K_b:\n self.gameobject.house = Rural()\n self.manager.go_to(TestScene(self.gameobject))\n\n\nclass SceneManager:\n def __init__(self):\n self.scene = None\n gameobject = GameObject()\n self.go_to(TitleScene(gameobject))\n\n def go_to(self, scene):\n self.scene = scene\n self.scene.manager = self\n\n\ndef main():\n pygame.init()\n screen = pygame.display.set_mode(DISPLAY, FLAGS, DEPTH)\n pygame.display.set_caption(TITLE)\n timer = pygame.time.Clock()\n running = True\n manager = SceneManager()\n\n while running:\n timer.tick(FPS)\n if pygame.event.get(QUIT):\n running = False\n return\n\n screen = pygame.display.get_surface()\n manager.scene.handle_events(pygame.event.get())\n manager.scene.update()\n manager.scene.render(screen)\n pygame.display.flip()\n\n\ndef terminate():\n pygame.quit()\n sys.exit()\n\n\nif __name__ == '__main__': main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"398442118","text":"#!/usr/bin/env python3\n# Submission for CPSC526 Assignment 3\n# Authors: Ethan Hamman, 10125341\n# Kyle Sutherland, 10120910\n# Tutorial Section 1\n\nimport os\nimport socket\nimport sys\nimport random\nimport string\nfrom cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives import padding\nfrom datetime import datetime\nfrom time import sleep\nbackend = default_backend()\n\n#Read arguments\ncommand = sys.argv[1]\nfilename = sys.argv[2]\nHOST = sys.argv[3].split(\":\")[0]\nPORT = int(sys.argv[3].split(\":\")[1])\nargcipher = sys.argv[4]\nif(argcipher == \"none\"):\n key = \"\"\n IV = b\"\"\nelse:\n key = sys.argv[5]\n IV = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for i in range(16))\n IV = bytes(IV.encode('utf-8'))\n\n#Will format the key to the appropriate length\ndef formatKey():\n global key\n if(argcipher == \"none\"):\n return\n #Using MD5 hash to generate 128bit key, I know its depracated\n key = key.encode(\"UTF-8\") #String to bytes\n if(argcipher==\"aes128\"):\n digest = digest = hashes.Hash(hashes.MD5(), backend=default_backend())\n digest.update(key)\n key = digest.finalize()\n elif(argcipher==\"aes256\"):\n digest = hashes.Hash(hashes.SHA256(), backend=default_backend())\n digest.update(key)\n key = digest.finalize()\n\n#Takes bytes, returns encrypted bytes\n#https://cryptography.io/en/latest/hazmat/primitives/symmetric-encryption/\ndef encryptData(data):\n padder = padding.PKCS7(128).padder()\n paddedData = padder.update(data) + padder.finalize()\n if(argcipher == \"none\"):\n return data\n elif(argcipher == \"aes128\" or argcipher==\"aes256\"): #Key will be properly formatted, do the same thing for both types\n cipher = Cipher(algorithms.AES(key), modes.CBC(IV), backend=backend)\n encryptor = cipher.encryptor()\n return encryptor.update(paddedData) + encryptor.finalize()\n else:\n print(\"Cipher is not known\")\n sys.exit(0)\n\n#Takes bytes, returns encrypted bytes\ndef decryptData(data):\n if(argcipher == \"none\"):\n return data\n elif(argcipher == \"aes128\" or argcipher==\"aes256\"): #Key will be properly formatted, do the same thing for both types\n cipher = Cipher(algorithms.AES(key), modes.CBC(IV), backend=backend)\n decryptor = cipher.decryptor()\n padded_data = decryptor.update(data) + decryptor.finalize()\n unpadder = padding.PKCS7(128).unpadder()\n return unpadder.update(padded_data) + unpadder.finalize()\n else:\n print(\"Cipher is not known\")\n sys.exit(0)\n\n#Reads from stdio, then sends it\ndef sendfile(sendSoc):\n dat = sys.stdin.buffer.read()\n sendSoc.sendall(encryptData(dat))\n print(\"OK\")\n\n#Recieves the file, then writes it\ndef recvfile(inSoc):\n #if the file is encrypted, write data to tmp file instead (more efficent than entireData += partOfData)\n if(argcipher != \"none\"):\n recFile = filename + \".tmp\"\n else:\n recFile = filename\n\n #open file\n try:\n f = open(recFile, 'wb')\n except:\n print(\"Error opening file\")\n sys.exit(0)\n\n #receive the data and write it to the file\n while 1:\n data = inSoc.recv(1024)\n if(not data): break\n f.write(data)\n #if(len(data)<1024): break\n\n f.close()\n\n #If the file is encrypted, then decrypt using the secret key\n processFile(recFile)\n\n#Processes a received encrypted temp file\ndef processFile(fileIn):\n try:\n fIn = open(fileIn, 'rb')\n except:\n print(\"Error opening file\")\n sys.exit(0)\n\n data = fIn.read()\n if(not data):\n sys.exit(\"Error: got NOFILE from server\")\n try:\n plaintext = decryptData(data)\n except:\n sys.exit('Error: cannot decrypt')\n sys.stdout.buffer.write(plaintext)\n\n fIn.close()\n\n os.remove(fileIn)\n\n\ndef main():\n formatKey()\n outboundSoc = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #Initialize socket that connects to server\n outboundSoc.connect((HOST, PORT))\n #print(\"Connected to \" + HOST + \" on port \" + str(PORT)) #Connect to server\n outboundSoc.sendall(argcipher.encode(\"UTF-8\")) #send the cipher to be used and the initialization vector\n sleep(0.01)\n outboundSoc.sendall(IV)\n outboundSoc.sendall(encryptData((command+\":\"+filename).encode(\"UTF-8\"))) #Send encrypted command and filename to server\n try:\n response = decryptData(outboundSoc.recv(1024))\n except:\n sys.exit(\"Error: Wrong key.\")\n\n if(response != b'good'):\n sys.exit('Error: File Not Found')\n\n if(command==\"write\"):\n sendfile(outboundSoc)\n elif(command=='read'):\n recvfile(outboundSoc)\n else:\n print('Error: command not found.')\n sys.exit(0)\n\nmain()\n","sub_path":"Ass3/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"624703526","text":"import os, os.path\nimport cherrypy\n\nfrom cherrypy.process.plugins import Daemonizer\n\nfrom app import Root\nfrom app import UniverseSeed\nfrom app import UniverseUpdate\nfrom app import Display\nfrom app import Parameters\nfrom app import Seed\nfrom app import Info\n\nif __name__=='__main__':\n root = Root()\n root.api = Root()\n root.api.universe = Root()\n root.api.universe.seed = UniverseSeed()\n root.api.universe.update = UniverseUpdate()\n root.api.display = Display()\n root.api.parameters = Parameters()\n root.seed = Seed()\n root.info = Info()\n\n conf = {\n '/api': {\n 'request.dispatch': cherrypy.dispatch.MethodDispatcher(),\n 'tools.sessions.on': True,\n 'tools.proxy.on': True,\n 'tools.response_headers.on': True,\n 'tools.response_headers.headers': [\n ('Content-Type', 'text/plain')],\n },\n '/seed': {\n 'tools.sessions.on': True,\n },\n '/info': {\n 'tools.sessions.on': True,\n },\n }\n\n local_dir = os.path.abspath(os.getcwd())\n cherrypy.config.update({\n 'server.socket_port': 8081,\n 'error_page.404': os.path.join(local_dir, './app/static/html/404.html'),\n })\n\n d = Daemonizer(cherrypy.engine)\n d.subscribe()\n \n # run the application using CherryPy's HTTP Web Server\n cherrypy.quickstart(root, '/', conf)\n","sub_path":"wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"592817441","text":"\"\"\"\npython 用def关键字定义函数\npython 的函数不用定义返回值\n函数没有用return 返回值时,此时返回值是None\n\n函数名用小驼峰,函数内的缩进要保持一致\n\npython 函数的参数类型有\n1、位置参数 2、关键字参数 3、默认参数 4、收集参数 5、关键字收集参数\n\n\"\"\"\n\n\n# 定义函数\ndef func(aaa, bbb):\n print(\"func run...{0}...{1}\".format(aaa, bbb))\n\n\n# 调用函数\nfunc(\"abc\", \"xyz\") # 位置参数\nfunc(bbb=\"xyz\", aaa=\"abc\") # 关键字参数, 传参时带上了参数名,函数按参数名接收实参,而不按参数位置接参\n\n# 把函数赋值给变量\nfund = func\nfund(\"xxx\", \"yyy\")\n\n\n# 有返回值的函数\ndef sumfunc(x, y):\n return x + y\n\n\nsumxy = sumfunc(1, 2)\nprint(sumxy)\n\n\n# 默认参数\n# 默认参数后面不能跟非默认参数\n# p1,p2 就是默认参数\ndef func_name(a, p1=\"abc\", p2=\"bcd\"):\n print(a, p1, p2)\n\n\nfunc_name(1)\nfunc_name(1, 2)\nfunc_name(1, 2, 3)\nfunc_name(a=1, p2=3, p1=2)\n\n\n# 收集参数\n# 收集参数可以接受没有定义的参数,参数名要由*引导,收集参数的类型是 tuple\ndef funcArgus(a, b, *arg):\n print(a, b, arg)\n print(type(arg))\n for item in arg:\n print(item)\n\n\nfuncArgus(\"valuea\", \"valueb\", \"dsfsdf\", \"fsdfdsf\", \"fsdfsdf\")\n\n\n# 关键字收集参数\n# 关键字收集参数 可以接受没有定义的关键字参数,参数名要由**引导,关键字收集参数的类型是 dict\ndef funcKeywordArgus(a, **kwargs):\n print(a, kwargs)\n print(type(kwargs))\n for k, v in kwargs.items():\n print(k, \"......\", v)\n\n\nfuncKeywordArgus(\"aaa\", p1=\"bbb\", p2=\"ccc\")\n\n\n# 混合使用各种类型的参数\n# 定义各种类型参数的顺序是 位置参数--》收集参数--》关键字参数--》关键字收集参数\ndef stu(name, age, *args, hobby=\"没有\", **kwargs):\n print(\"我叫 {0},今年 {1}岁\".format(name, age))\n if hobby == \"没有\":\n print(\"没有爱好哦\")\n else:\n print(\"爱好是 {0}\".format(hobby))\n print(\"*\" * 10)\n for i in args:\n print(i)\n print(\"#\" * 10)\n\n for k, v in kwargs.items():\n print(k, \"......\", v)\n\n\nstu(\"张三\", 1, \"收集参数1\", \"收集参数2\", \"收集参数3\", hobby=\"唱歌\", p1=\"value1\", p2=\"value2\")\n\n\n# 函数文档\ndef funcStu(name, age):\n \"\"\"\n 这个是文档的内容\n :param name: 学生姓名\n :param age: 学生年龄\n :return 此函数没有返回值\n \"\"\"\n print(\"。。。。\")\n\n\n# 查看函数文档\nhelp(funcStu)\nprint(\"###########\")\nprint(funcStu.__doc__)\n\n\n# 递归函数\n# 直接或间接调用自身的函数,有递归深度限制,超过限制报错\n# 写递归函数时要注意结束条件\n\n# 斐波那契数列 1,1,2,3,5,8 。。。\ndef fib(n):\n if n == 1:\n return 1\n if n == 2:\n return 1\n\n return fib(n - 1) + fib(n - 2)\n\n\nidx = 10\nprint(\"斐波那契数列第{0} 项的值为\".format(idx), fib(idx))\n\n\n# 汉诺塔\ndef hannota(n, a, b, c):\n if n == 1:\n print(a, \"--->\", c)\n return\n if n == 2:\n print(a, \"--->\", b)\n print(a, \"--->\", c)\n print(b, \"--->\", c)\n return\n # 把n-1个盘子 从 a 塔 借助 c 移到 b上\n hannota(n - 1, a, c, b)\n print(a, \"--->\", c)\n # 把n-1个盘子 从 b 塔借助 a 移到 c 上\n hannota(n - 1, b, a, c)\n\n\na = \"A\"\nb = \"B\"\nc = \"C\"\nn = 1\n\nhannota(n, a, b, c)\n\n# 变量的唯一编号,用 id()函数可打印出来\nm = 100\nn = m\nprint(\"打印变量 m,n 的id\", id(m), id(n)) # 此时两个变量的id 相同\nm = 101\nprint(m) # 101\nprint(n) # 100\nprint(\"为m赋值后,再次打印变量 m,n 的id\", id(m), id(n)) # 改变m的值,n的值不会改变,此时两个变量的 id 不相同\n\nlist1 = [1, 2, 3, 4, 5, 6]\nlist2 = list1\nlist1[2] = 100\nprint(list1)\nprint(list2)\nprint(\"打印 list1 和 list2 的 id\", id(list1), id(list2)) # 两个变量 id 相同,list1值改变后 list2 的值改变了\n","sub_path":"01_helloworld/04_function.py","file_name":"04_function.py","file_ext":"py","file_size_in_byte":3938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"80831909","text":"#!/usr/bin/env python3\nimport sys\n\ndef solution(n, a):\n check = len(a) / 2 + 1\n counter = {}\n valid = True\n for number in a:\n if number not in counter:\n counter[number] = 0\n counter[number] += 1\n if counter[number] > check:\n valid = False\n break\n if valid:\n print('YES')\n else:\n print('NO')\n\n# input_str = \"\"\n\n# for line in sys.stdin:\n# input_str += line\n\ninput_str = '3\\n1 1 2'\n\nparsed = input_str.split('\\n')\nn = parsed[0]\na = parsed[1].split(' ')\n\nprint(n, a)\n\nsolution(n, a)\n","sub_path":"viblo-code/E/changing-seats/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"454297199","text":"#Rock_paper_program\nimport random\nl=[\"rock\",\"paper\",\"scissor\"]\ni=input(\"press y to play \")\nwhile(i==\"y\"):\n a=input(\"choose r:for rock, p:for paper or s:for scissor \")\n b=random.choice(l)\n print(\"computer chose\",b)\n print(\"your choice is\",a)\n if a==\"r\":\n if b==l[0]:\n print(\"its a tie\")\n elif b==l[1]: \n print(\"computer wins\")\n elif b==l[2]: \n print(\"you win\")\n elif a==\"p\":\n if b==l[1]: \n print(\"its a tie\")\n elif b==l[2]: \n print(\"computer wins\")\n elif b==l[0]:\n print(\"you win\")\n elif a==\"s\":\n if b==l[2]: \n print(\"its a tie\")\n elif b==l[0]: \n print(\"computer wins\")\n elif b==l[1]: \n print(\"you win\")\n else:\n print(\"please choose\")\n c=input(\"press q to quit and p to play \")\n if c==\"q\":\n print(\"thank you\")\n break\n elif c==\"p\":\n print(\"continue the game\")\n \n","sub_path":"Rock_paper_program.py","file_name":"Rock_paper_program.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"154440330","text":"import numpy as np\r\n\r\nN = 7\r\n\r\nM = 10\r\n\r\nA = np.random.randint(0, 100, (N, M))\r\n\r\nprint(A)\r\n\r\nAverage_line = A.mean(axis=1)\r\n\r\nAverage_column = A.mean(axis=0)\r\n\r\nAverage_line = Average_line[: , np.newaxis]\r\n\r\nA = np.hstack((A, Average_line))\r\n\r\nAverage_column = np.hstack((Average_column, [0.]))\r\n\r\nA = np.vstack((A, Average_column))\r\n\r\nprint(\"Новая матрица:\\n\" + str(A))","sub_path":"№5.py","file_name":"№5.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"325887384","text":"# -*- encoding: utf-8 -*-\nfrom django.db import models\nfrom django.contrib.auth.models import AbstractUser\nfrom django.utils import timezone\nimport uuid\n\n\n# Create your models here.\nclass UserProfile(AbstractUser):\n ROLE_ADMIN = 'Admin'\n ROLE_USER = 'User'\n\n ROLE_CHOICES = (\n (ROLE_ADMIN, 'Admin'),\n (ROLE_USER, 'User'),\n )\n role = models.CharField(choices=ROLE_CHOICES, default='User', max_length=10, blank=True, verbose_name='Role')\n\n class Meta:\n ordering = ['username']\n verbose_name = \"User\"\n verbose_name_plural = verbose_name\n db_table=\"userprofile\"\n\n def __str__(self):\n return self.username\n\n\nclass TaskList(models.Model):\n STATUS = (\n ('0x01', 'waiting'),\n ('0x02', 'success'),\n ('0x03', 'failure')\n )\n ENVIRONMENT = (\n ('TEST', 'test'),\n ('PROD', 'prod')\n )\n id = models.UUIDField(default=uuid.uuid4, primary_key=True)\n projects = models.CharField(max_length=500, verbose_name='project_name')\n modules = models.CharField(max_length=500, verbose_name='modeule_name')\n time = models.DateTimeField(default=timezone.now, verbose_name='start_time')\n status = models.CharField(choices=STATUS, max_length=10, verbose_name='status')\n comment = models.CharField(max_length=500, verbose_name='comment')\n sponsor = models.CharField(max_length=20, verbose_name='sponsor')\n env = models.CharField(choices=ENVIRONMENT, max_length=10, verbose_name='env')\n sql_file = models.FileField(upload_to=\"upload/%Y%m\", max_length=100, null=True,blank=True)\n \n class Meta: \n ordering = ['time']\n verbose_name = \"User\"\n verbose_name_plural = verbose_name\n db_table=\"tasklist\"\n\n\nclass Tasklog(models.Model):\n tasklist = models.ForeignKey(TaskList,verbose_name=\"id\", on_delete=models.CASCADE)\n log = models.TextField(verbose_name='log')\n\n class Meta:\n verbose_name = \"User\"\n verbose_name_plural = verbose_name\n db_table=\"tasklog\"\n","sub_path":"release/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"507282233","text":"#!/usr/bin/python\r\n# -*- coding: UTF-8 -*-\r\nimport re,requests,time,sys,argparse\r\nfrom bs4 import BeautifulSoup #also require lxml module\r\n\r\ndef getID(inputsym):\r\n # For a given gene symbol return all ID about this gene with filitering organism is Homo sapiens and experiment type including RNA-seq\r\n symbol = inputsym\r\n url = ''.join(['https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=gds&term=',symbol,'%5BAll%20Fields%5D%20AND%20%28\"Homo%20sapiens\"%5BOrganism%5D%20AND%20%28\"Expression%20profiling%20by%20high%20throughput%20sequencing\"%5BFilter%5D%20OR%20\"Expression%20profiling%20by%20array\"%5BFilter%5D%29%29&cmd=DetailsSearch&retmax=500'])\r\n header = {\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',\r\n 'Connection': 'keep-alive'\r\n }\r\n web = requests.get(url,headers=header)\r\n rprtid = re.findall('\\d*', web.text)\r\n ID = []\r\n for i in range(rprtid.__len__()):\r\n ID.append(str(rprtid[i]).rstrip('
    ').lstrip(''))\r\n return ID\r\n\r\ndef transtoGSE(ID):\r\n # For each ID of GSE data return the GSE number\r\n url = ''.join(['https://www.ncbi.nlm.nih.gov/gds/?term=',ID])\r\n header = {\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',\r\n 'Connection': 'keep-alive'\r\n }\r\n web = requests.get(url,headers=header)\r\n GSEnum = re.search('GSE\\d*', web.text).group()\r\n return GSEnum\r\n\r\ndef GetInfo(GSEnum):\r\n # For each GSE number return a list that includes GSE number, status, title, organism, experiment type, summary and overall design\r\n url = ''.join(['https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=', GSEnum])\r\n header = {\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',\r\n 'Connection': 'keep-alive'\r\n }\r\n web = requests.get(url,headers=header)\r\n soup = BeautifulSoup(web.text, 'lxml')\r\n if re.search('[Kk]nock(ed)?[\\s-]?[Dd]own',web.text) != None:\r\n stat = 'KD'\r\n elif re.search('[Kk]nock(ed)?[\\s-]?[Oo]ut',web.text) != None:\r\n stat = 'KO'\r\n elif re.search('[Oo]ver[\\s-]?[Ee]xpress',web.text) != None:\r\n stat = 'OE'\r\n else:\r\n stat = 'Unknown'\r\n status = soup.select(\r\n 'table > tr > td > table:nth-of-type(6) > tr:nth-of-type(3) > td:nth-of-type(2) > table > tr > td > table > tr > td > table:nth-of-type(2) > tr > td > table:nth-of-type(1) > tr:nth-of-type(2) > td:nth-of-type(2)')[0]\r\n title = soup.select(\r\n 'table > tr > td > table:nth-of-type(6) > tr:nth-of-type(3) > td:nth-of-type(2) > table > tr > td > table > tr > td > table:nth-of-type(2) > tr > td > table:nth-of-type(1) > tr:nth-of-type(3) > td:nth-of-type(2)')[0]\r\n organism = soup.select(\r\n 'table > tr > td > table:nth-of-type(6) > tr:nth-of-type(3) > td:nth-of-type(2) > table > tr > td > table > tr > td > table:nth-of-type(2) > tr > td > table:nth-of-type(1) > tr:nth-of-type(4) > td:nth-of-type(2)')[0]\r\n experiment = soup.select(\r\n 'table > tr > td > table:nth-of-type(6) > tr:nth-of-type(3) > td:nth-of-type(2) > table > tr > td > table > tr > td > table:nth-of-type(2) > tr > td > table:nth-of-type(1) > tr:nth-of-type(5) > td:nth-of-type(2)')[0]\r\n summary = soup.select(\r\n 'table > tr > td > table:nth-of-type(6) > tr:nth-of-type(3) > td:nth-of-type(2) > table > tr > td > table > tr > td > table:nth-of-type(2) > tr > td > table:nth-of-type(1) > tr:nth-of-type(6) > td:nth-of-type(2)')[0]\r\n design = soup.select(\r\n 'table > tr > td > table:nth-of-type(6) > tr:nth-of-type(3) > td:nth-of-type(2) > table > tr > td > table > tr > td > table:nth-of-type(2) > tr > td > table:nth-of-type(1) > tr:nth-of-type(8) > td:nth-of-type(2)')[0]\r\n pattern = re.compile(r'<[^>]+>', re.S)\r\n status = pattern.sub('', str(status))\r\n title = pattern.sub('', str(title))\r\n organism = pattern.sub('', str(organism))\r\n experiment = str(experiment).replace('
    ','; ', 1)\r\n experiment = pattern.sub('', experiment)\r\n summary = pattern.sub('', str(summary))\r\n design = pattern.sub('', str(design))\r\n # delete html label\r\n res = [GSEnum,status,title,organism,experiment,summary,design,str(stat)]\r\n return res\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser(description='Get Information From GEO dataset.')\r\n parser.add_argument('-i','--input', action='store', help='the path of input file, and you should import a gene list file separated by \"\\n\".')\r\n parser.add_argument('-o','--output', action='store', help='the path of output file.')\r\n parser.add_argument('--header', action='store_true', help='decide if the output file has a header.')\r\n args = parser.parse_args()\r\n print('Your input file is '+ args.input + ' and output file is ' + args.output + ' with header = ' + str(args.header) + '.\\nProcess is going, please wait...')\r\n file = open(args.input,'r') # You should import a gene list file separated by '\\n'\r\n gene = []\r\n for i in file.readlines():\r\n gene.append(i.rstrip('\\n'))\r\n file.close()\r\n with open(args.output,'a+',encoding='utf-8') as output:\r\n if args.header:\r\n output.write('\\t'.join(['Symbol','GSEnumber','Status','Title','Organism','Experiment','Summary','Design','Type'])+'\\n')\r\n for symbol in gene:\r\n ID = getID(symbol)\r\n print('There are '+str(ID.__len__())+' IDs found in gene '+str(symbol)+'.')\r\n cnt = 0\r\n for id in ID:\r\n print('In '+str(symbol)+', left '+ str(ID.__len__()-cnt) +' to go.')\r\n cnt += 1\r\n GSE = transtoGSE(id)\r\n time.sleep(4)\r\n output.write(symbol+'\\t'+'\\t'.join(GetInfo(GSE))+'\\n')\r\n print('Finished!')\r\n","sub_path":"GetInfoFromGEO/GetInfoFromGEO.py","file_name":"GetInfoFromGEO.py","file_ext":"py","file_size_in_byte":5926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"607895876","text":"from Products.Formulator.Field import ZMIField\nfrom Products.Formulator import Widget\nfrom Products.Formulator.DummyField import fields\nfrom Products.Formulator import Validator\nfrom zLOG import LOG, ERROR\n\nclass GadgetWidget(Widget.TextWidget):\n \"\"\"\n A widget that displays a renderjs gadget\n \"\"\"\n property_names = Widget.TextWidget.property_names + \\\n ['gadget_url', 'js_sandbox']\n\n gadget_url = fields.StringField('gadget_url',\n title='Gadget Url',\n description=(\"The url of the html page containing the \\\n gadget\"),\n default='',\n required=0)\n\n js_sandbox = fields.StringField('js_sandbox',\n title='Gadget Sandbox',\n description=(\"Gadget sandbox\"),\n default='',\n required=0)\n\n def render(self, field, key, value, REQUEST, render_prefix=None):\n return self.render_view(field, value, REQUEST, render_prefix, key)\n\n def render_view(self, field, value, REQUEST=None, render_prefix=None, key=None):\n kw = {}\n kw['data-gadget-url'] = field.get_value('gadget_url')\n kw['data-gadget-scope'] = field.id\n if key is not None:\n kw['data-gadget-editable'] = key\n kw['class'] = \"gadget\"\n kw['data-gadget-value'] = value\n kw['data-gadget-sandbox'] = field.get_value('js_sandbox')\n return Widget.render_element(\"div\",\n **kw)\n\n def get_javascript_list(self, field, REQUEST=None):\n \"\"\"\n Returns list of javascript needed by the widget\n \"\"\"\n js_list = ['rsvp.js', 'renderjs.js', 'erp5_gadgetfield.js',\n 'jio_sha256.amd.js', 'jio.js']\n result = []\n try:\n for js_file in js_list:\n result.append(field.restrictedTraverse(js_file).absolute_url()) \n except KeyError:\n LOG('keyError:', ERROR, 'Error Value: %s' % js_file)\n return []\n\n return result\n\n\nGadgetWidgetInstance = GadgetWidget()\n\nclass GadgetField(ZMIField):\n meta_type = \"GadgetField\"\n\n widget = GadgetWidgetInstance\n validator = Validator.FileValidatorInstance\n","sub_path":"product/ERP5Form/GadgetField.py","file_name":"GadgetField.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"487147155","text":"\"\"\"\nFunctional tests for auth module\n\"\"\"\nfrom django.contrib.auth import get_user_model\nfrom django.test import LiveServerTestCase\n\nimport configurations\nconfigurations.setup()\n\n\nclass HomePageTestCase(LiveServerTestCase):\n \"\"\"\n Functional tests for admin page\n \"\"\"\n serialized_rollback = True\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n user = get_user_model()\n user.objects.create(email='admin@test.test')\n\n @classmethod\n def tearDownClass(cls):\n super().tearDownClass()\n user = get_user_model()\n user.objects.filter(email='admin@test.test').delete()\n\n def test_admin_account(self):\n \"\"\" Test home page \"\"\"\n user = get_user_model()\n user.objects.filter(email='admin@test.test')\n assert user.objects.count()\n","sub_path":"accounts/tests/test_functional.py","file_name":"test_functional.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"55141895","text":"import unittest\n\nfrom Battleship.battleship import Battleship as b\n\n\nclass TestBattleshipLogic(unittest.TestCase):\n @staticmethod\n def place_all_ships(game, player):\n b.place_ship(game, player, (0, 0), True)\n b.place_ship(game, player, (9, 0))\n b.place_ship(game, player, (7, 9), True)\n b.place_ship(game, player, (0, 9), True)\n b.place_ship(game, player, (0, 3))\n b.place_ship(game, player, (2, 3), True)\n b.place_ship(game, player, (2, 5), True)\n b.place_ship(game, player, (5, 3), True)\n b.place_ship(game, player, (7, 3), True)\n b.place_ship(game, player, (6, 7), True)\n\n @staticmethod\n def win(player):\n win = (\n (0, 0), (1, 0), (2, 0), (3, 0), (9, 0), (9, 1), (9, 2), (7, 9), (8, 9), (9, 9), (0, 9), (1, 9), (0, 3),\n (0, 4), (2, 3), (3, 3), (2, 5), (5, 3), (7, 3), (6, 7))\n g = b.new_game(10, 10, [[4, 1], [3, 2], [2, 3], [1, 4]])\n TestBattleshipLogic.place_all_ships(g, 0)\n TestBattleshipLogic.place_all_ships(g, 1)\n if player == 0:\n for i in range(len(win)):\n b.do_move(g, win[i])\n if i < len(win) - 1:\n b.do_move(g, (i % 10, i // 10))\n else:\n for i in range(len(win)):\n b.do_move(g, (i % 10, i // 10))\n b.do_move(g, win[i])\n return g\n\n def test_new_game_keys(self):\n g = b.new_game()\n if g[\"width\"] is None or g[\"height\"] is None or g[\"ships\"] is None or g[\"board\"] is None or g[\"state\"] is None:\n self.fail(\"some of the game state keys are missing\")\n\n def test_new_game_board(self):\n g = b.new_game()\n self.assertEqual(len(g[\"board\"]), 2)\n self.assertEqual(len(g[\"board\"][0]), g[\"height\"])\n self.assertEqual(len(g[\"board\"][1]), g[\"height\"])\n for i in range(g[\"height\"]):\n self.assertEqual(len(g[\"board\"][0][i]), g[\"width\"])\n self.assertEqual(len(g[\"board\"][1][i]), g[\"width\"])\n for y in range(g[\"height\"]):\n for x in range(g[\"width\"]):\n self.assertEqual(g[\"board\"][0][x][y], None)\n self.assertEqual(g[\"board\"][1][x][y], None)\n\n def test_new_game_state(self):\n g = b.new_game(10, 10, [[4, 1], [3, 2], [2, 3], [1, 4]])\n self.assertEqual(g[\"state\"], [\"placing ships\", [0, 0]])\n\n def test_player0_place_ships(self):\n g = b.new_game(10, 10, [[4, 1], [3, 2], [2, 3], [1, 4]])\n TestBattleshipLogic.place_all_ships(g, 0)\n self.assertEqual(g[\"state\"], ['placing ships', [10, 0]])\n\n def test_player1_place_ships(self):\n g = b.new_game(10, 10, [[4, 1], [3, 2], [2, 3], [1, 4]])\n TestBattleshipLogic.place_all_ships(g, 1)\n self.assertEqual(g[\"state\"], ['placing ships', [0, 10]])\n\n def test_all_ships_placed(self):\n g = b.new_game(10, 10, [[4, 1], [3, 2], [2, 3], [1, 4]])\n TestBattleshipLogic.place_all_ships(g, 0)\n TestBattleshipLogic.place_all_ships(g, 1)\n self.assertEqual(g[\"state\"], ['playing', 0])\n\n def test_player0_win(self):\n g = TestBattleshipLogic.win(0)\n self.assertEqual(g[\"state\"], [\"end\", 0])\n\n def test_player1_win(self):\n g = TestBattleshipLogic.win(1)\n self.assertEqual(g[\"state\"], [\"end\", 1])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"Battleship/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"172827829","text":"import angr\nimport avatar2\nimport claripy\nimport os\n\nfrom angr_targets import AvatarGDBConcreteTarget\n\n\nbinary_x64 = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n os.path.join('..', '..', 'binaries', 'tests', 'x86_64',\n 'windows', 'not_packed_pe64.exe'))\n\n\nGDB_SERVER_IP = '192.168.59.164'\nGDB_SERVER_PORT = 9999\n\nSTARTING_DECISION_ADDRESS = 0x401786\nDROP_V1 = 0x4017FC\nDROP_V2 = 0x401827\nMALWARE_EXECUTION_END = 0x401863\nFAKE_CC = 0x40184D\nVENV_DETECTED = 0x401835\n\n\navatar_gdb = None\n\n\ndef setup_x64():\n print(\"Configure a windows machine with a static IP %s. \"\n \"Check windows firewall configurations to be sure that the connections to %s:%s are not blocked\\n\"\n \"Install gdbserver on the machine, b\"\n \"e careful the architecture (x86 or x64) of gdbserver should be the same as the debugged binary.\\n\"\n \"Currently using Cygwin for 32 bit gdbserver and Cygwin for 64 bit gdbserver\" % (GDB_SERVER_IP,\n GDB_SERVER_IP,\n GDB_SERVER_PORT))\n\n print(\"On windows machine execute gdbserver %s:%s path/to/simple_crackme.exe\" % (GDB_SERVER_IP, GDB_SERVER_PORT))\n input(\"Press enter when gdbserver has been executed\")\n\n\ndef teardown():\n global avatar_gdb\n if avatar_gdb:\n avatar_gdb.exit()\n\n\ndef test_concrete_engine_windows_x64_no_simprocedures():\n print(\"test_concrete_engine_windows_x64_no_simprocedures\")\n global avatar_gdb\n try:\n # pylint: disable=no-member\n avatar_gdb = AvatarGDBConcreteTarget(avatar2.archs.x86.X86_64, GDB_SERVER_IP, GDB_SERVER_PORT)\n p = angr.Project(binary_x64, concrete_target=avatar_gdb, use_sim_procedures=False,\n page_size=0x1000)\n entry_state = p.factory.entry_state()\n solv_concrete_engine_windows_x64(p, entry_state)\n except ValueError:\n print(\"Failing executing test\")\n\n\ndef test_concrete_engine_windows_x64_simprocedures():\n print(\"test_concrete_engine_windows_x64_simprocedures\")\n global avatar_gdb\n try:\n # pylint: disable=no-member\n avatar_gdb = AvatarGDBConcreteTarget(avatar2.archs.x86.X86_64, GDB_SERVER_IP, GDB_SERVER_PORT)\n p = angr.Project(binary_x64, concrete_target=avatar_gdb, use_sim_procedures=True,\n page_size=0x1000)\n entry_state = p.factory.entry_state()\n solv_concrete_engine_windows_x64(p, entry_state)\n except ValueError:\n print(\"Failing executing test\")\n\n\ndef test_concrete_engine_windows_x64_unicorn_no_simprocedures():\n print(\"test_concrete_engine_windows_x64_unicorn_no_simprocedures\")\n global avatar_gdb\n try:\n # pylint: disable=no-member\n avatar_gdb = AvatarGDBConcreteTarget(avatar2.archs.x86.X86_64, GDB_SERVER_IP, GDB_SERVER_PORT)\n p = angr.Project(binary_x64, concrete_target=avatar_gdb, use_sim_procedures=False,\n page_size=0x1000)\n entry_state = p.factory.entry_state(add_options=angr.options.unicorn)\n solv_concrete_engine_windows_x64(p, entry_state)\n except ValueError:\n print(\"Failing executing test\")\n\n\ndef test_concrete_engine_windows_x64_unicorn_simprocedures():\n print(\"test_concrete_engine_windows_x64_unicorn_simprocedures\")\n global avatar_gdb\n try:\n # pylint: disable=no-member\n avatar_gdb = AvatarGDBConcreteTarget(avatar2.archs.x86.X86_64, GDB_SERVER_IP, GDB_SERVER_PORT)\n p = angr.Project(binary_x64, concrete_target=avatar_gdb, use_sim_procedures=True,\n page_size=0x1000)\n entry_state = p.factory.entry_state(add_options=angr.options.unicorn)\n solv_concrete_engine_windows_x64(p, entry_state)\n except ValueError:\n print(\"Failing executing test\")\n\n\ndef execute_concretly(p, state, address, concretize):\n simgr = p.factory.simgr(state)\n simgr.use_technique(angr.exploration_techniques.Symbion(find=[address], concretize=concretize))\n exploration = simgr.run()\n return exploration.stashes['found'][0]\n\n\ndef solv_concrete_engine_windows_x64(p, entry_state):\n print(\"[1]Executing malware concretely until address: \" + hex(STARTING_DECISION_ADDRESS))\n new_concrete_state = execute_concretly(p, entry_state, STARTING_DECISION_ADDRESS, [])\n\n # declaring symbolic buffer\n arg0 = claripy.BVS('arg0', 8 * 32)\n symbolic_buffer_address = new_concrete_state.regs.rbp - 0x60\n new_concrete_state.memory.store(new_concrete_state.solver.eval(symbolic_buffer_address), arg0)\n\n print(\"[2]Symbolically executing malware to find dropping of second stage [ address: \" + hex(DROP_V1) + \" ]\")\n simgr = p.factory.simgr(new_concrete_state)\n exploration = simgr.explore(find=DROP_V1, avoid=[FAKE_CC, DROP_V2, VENV_DETECTED])\n new_symbolic_state = exploration.stashes['found'][0]\n\n print(\"[3]Executing malware concretely with solution found until the end \" + hex(MALWARE_EXECUTION_END))\n execute_concretly(p, new_symbolic_state, MALWARE_EXECUTION_END, [(symbolic_buffer_address, arg0)])\n\n print(\"[4]Malware execution ends, the configuration value downloaded from C&C is: \" + hex(\n new_symbolic_state.solver.eval(arg0, cast_to=int)))\n","sub_path":"tests/manual_concrete_not_packed_pe64.py","file_name":"manual_concrete_not_packed_pe64.py","file_ext":"py","file_size_in_byte":5361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"160851114","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3350)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/noval/python/interpreter/gerneralconfiguration.py\n# Compiled at: 2019-10-17 01:45:10\n# Size of source mod 2**32: 10826 bytes\nfrom noval import _, GetApp\nimport tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import messagebox\nimport noval.consts as consts, noval.util.utils as utils, noval.python.interpreter.interpretermanager as interpretermanager, noval.python.parser.intellisence as intellisence, noval.util.apputils as sysutilslib, noval.util.fileutils as fileutils, noval.python.parser.run as pythonrun, os, noval.ui_utils as ui_utils\n\nclass InterpreterGeneralConfigurationPanel(ui_utils.BaseConfigurationPanel):\n __doc__ = 'description of class'\n\n def __init__(self, parent):\n ui_utils.BaseConfigurationPanel.__init__(self, parent)\n self._showBuiltinInterpreterWindowVar = tk.IntVar(value=utils.profile_get_int(consts.PYTHON_INTERPRETER_VIEW_NAME + 'ViewVisible', False))\n showBuiltinInterpreterWindowCheckBox = ttk.Checkbutton(self, text=_('Show the builtin interpreter window'), variable=self._showBuiltinInterpreterWindowVar)\n showBuiltinInterpreterWindowCheckBox.pack(padx=consts.DEFAUT_CONTRL_PAD_X, fill='x', pady=(consts.DEFAUT_CONTRL_PAD_Y, 0))\n self._createIntellienseDatabaseVar = tk.IntVar(value=utils.profile_get_int('AutoGenerateDatabase', True))\n createIntellienseDatabaseCheckBox = ttk.Checkbutton(self, text=_('Automatically generate intellisence database when add interpreter'), variable=self._createIntellienseDatabaseVar)\n createIntellienseDatabaseCheckBox.pack(padx=consts.DEFAUT_CONTRL_PAD_X, fill='x')\n self._embedInterpreterVar = tk.IntVar(value=utils.profile_get_int('EmbedInterpreterInterminator', True))\n embedInterpreterCheckBox = ttk.Checkbutton(self, text=_('Embed the interpreter when open terminal'), variable=self._embedInterpreterVar)\n embedInterpreterCheckBox.pack(padx=consts.DEFAUT_CONTRL_PAD_X, fill='x')\n self._rember_pipsourceVar = tk.IntVar(value=utils.profile_get_int('RemberPipsource', True))\n RemberPipsourceVarCheckBox = ttk.Checkbutton(self, text=_('Rember the pip source last used'), variable=self._rember_pipsourceVar)\n RemberPipsourceVarCheckBox.pack(padx=consts.DEFAUT_CONTRL_PAD_X, fill='x')\n box_frame = ttk.LabelFrame(self, text=_('Intellisence database update interval'))\n box_frame.pack(padx=consts.DEFAUT_CONTRL_PAD_X, fill='x', pady=(consts.DEFAUT_CONTRL_PAD_Y, 0))\n self._update_interval_var = tk.IntVar(value=utils.profile_get_int('DatabaseUpdateInterval', consts.UPDATE_ONCE_DAY))\n updateEveryStartupRadioBtn = ttk.Radiobutton(box_frame, text=_('Once when startup'), value=consts.UPDATE_ONCE_STARTUP, variable=self._update_interval_var)\n updateEveryStartupRadioBtn.pack(padx=consts.DEFAUT_CONTRL_PAD_X, fill='x')\n updateEveryDayRadioBtn = ttk.Radiobutton(box_frame, text=_('Once a day'), value=consts.UPDATE_ONCE_DAY, variable=self._update_interval_var)\n updateEveryDayRadioBtn.pack(padx=consts.DEFAUT_CONTRL_PAD_X, fill='x')\n updateEveryWeekRadioBtn = ttk.Radiobutton(box_frame, text=_('Once a week'), value=consts.UPDATE_ONCE_WEEK, variable=self._update_interval_var)\n updateEveryWeekRadioBtn.pack(padx=consts.DEFAUT_CONTRL_PAD_X, fill='x')\n updateEveryMonthRadioBtn = ttk.Radiobutton(box_frame, text=_('Once a month'), value=consts.UPDATE_ONCE_MONTH, variable=self._update_interval_var)\n updateEveryMonthRadioBtn.pack(padx=consts.DEFAUT_CONTRL_PAD_X, fill='x')\n neverUpdateRadioBtn = ttk.Radiobutton(box_frame, text=_('Never'), value=consts.NEVER_UPDATE_ONCE, variable=self._update_interval_var)\n neverUpdateRadioBtn.pack(padx=consts.DEFAUT_CONTRL_PAD_X, fill='x')\n if GetApp().GetDebug():\n sbox = ttk.LabelFrame(self, text=_('Intellisence database location'))\n sbox.pack(padx=consts.DEFAUT_CONTRL_PAD_X, fill='x', pady=(consts.DEFAUT_CONTRL_PAD_Y, 0))\n row = ttk.Frame(sbox)\n interpreterLabelText = ttk.Label(row, text=_('Interpreter:'))\n interpreterLabelText.pack(side=tk.LEFT, pady=(consts.DEFAUT_CONTRL_PAD_Y, 0))\n choices, default_selection = interpretermanager.InterpreterManager().GetChoices()\n self.interpreterCombo = ttk.Combobox(row, values=choices)\n self.interpreterCombo.state(['readonly'])\n if len(choices) > 0:\n self.interpreterCombo.current(default_selection)\n self.interpreterCombo.bind('<>', self.OnSelectInterpreter)\n self.interpreterCombo.pack(side=tk.LEFT, fill='x', pady=(consts.DEFAUT_CONTRL_PAD_Y, 0))\n row.pack(padx=consts.DEFAUT_CONTRL_PAD_X, fill='x', pady=(0, consts.DEFAUT_CONTRL_PAD_Y))\n row = ttk.Frame(sbox)\n locationLabelText = ttk.Label(row, text=_('Database Location:'))\n locationLabelText.pack(side=tk.LEFT, fill='x')\n self.location_var = tk.StringVar()\n locationControl = ttk.Entry(row, textvariable=self.location_var)\n locationControl['state'] = tk.DISABLED\n locationControl.pack(side=tk.LEFT, fill='x', expand=1)\n row.pack(padx=consts.DEFAUT_CONTRL_PAD_X, fill='x', pady=(0, consts.DEFAUT_CONTRL_PAD_Y))\n row = ttk.Frame(sbox)\n into_file_explower_btn = ttk.Button(row, text=_('Into file explorer'), command=self.IntoFileExplorer)\n into_file_explower_btn.pack(side=tk.LEFT, fill='x', padx=(0, consts.DEFAUT_CONTRL_PAD_X))\n copy_path_btn = ttk.Button(row, text=_('Copy path'), command=self.CopyDatabasePath)\n copy_path_btn.pack(side=tk.LEFT, fill='x', padx=(0, consts.DEFAUT_CONTRL_PAD_X))\n database_version_btn = ttk.Button(row, text=_('Database version'), command=self.GetDatabaseVersion)\n database_version_btn.pack(side=tk.LEFT, fill='x', padx=(0, consts.DEFAUT_CONTRL_PAD_X))\n last_update_btn = ttk.Button(row, text=_('Last update time'), command=self.GetLastUpdateTime)\n last_update_btn.pack(side=tk.LEFT, fill='x', padx=(0, consts.DEFAUT_CONTRL_PAD_X))\n clear_data_btn = ttk.Button(row, text=_('Clear data'), command=self.ClearIntellisenceData)\n clear_data_btn.pack(side=tk.LEFT, fill='x', padx=(0, consts.DEFAUT_CONTRL_PAD_X))\n row.pack(padx=consts.DEFAUT_CONTRL_PAD_X, fill='x', pady=(0, consts.DEFAUT_CONTRL_PAD_Y))\n self.OnSelectInterpreter()\n\n def IntoFileExplorer(self):\n location = self.location_var.get()\n fileutils.safe_open_file_directory(location)\n\n def CopyDatabasePath(self):\n path = self.location_var.get()\n sysutilslib.CopyToClipboard(path)\n messagebox.showinfo('', _('Copied to clipboard'))\n\n def GetDatabaseVersion(self):\n interpreter = self.GetCurrentInterpreter()\n if interpreter is None:\n return\n try:\n intellisence_data_path = intellisence.IntellisenceManager().GetInterpreterIntellisenceDataPath(interpreter)\n database_version = pythonrun.LoadDatabaseVersion(intellisence_data_path)\n messagebox.showinfo('', database_version)\n except Exception as e:\n messagebox.showerror('', str(e))\n\n def GetLastUpdateTime(self):\n interpreter = self.GetCurrentInterpreter()\n if interpreter is None:\n return\n try:\n intellisence_data_path = intellisence.IntellisenceManager().GetInterpreterIntellisenceDataPath(interpreter)\n last_update_time = intellisence.IntellisenceManager().GetLastUpdateTime(intellisence_data_path)\n messagebox.showinfo('', last_update_time)\n except Exception as e:\n messagebox.showerror('', str(e))\n\n def ClearIntellisenceData(self):\n interpreter = self.GetCurrentInterpreter()\n if interpreter is None:\n return\n intellisence_data_path = intellisence.IntellisenceManager().GetInterpreterIntellisenceDataPath(interpreter)\n for f in os.listdir(intellisence_data_path):\n file_path = os.path.join(intellisence_data_path, f)\n os.remove(file_path)\n\n def GetCurrentInterpreter(self):\n selection = self.interpreterCombo.current()\n if -1 == selection:\n return\n interpreter = interpretermanager.InterpreterManager().interpreters[selection]\n return interpreter\n\n def OnSelectInterpreter(self, event=None):\n interpreter = self.GetCurrentInterpreter()\n if interpreter is None:\n return\n database_path = intellisence.IntellisenceManager().GetInterpreterDatabasePath(interpreter)\n self.location_var.set(database_path)\n\n def GetUpdateIntervalOption(self):\n return self._update_interval_var.get()\n\n def SetUpdateIntervalOption(self):\n update_interval_option = utils.profile_get_int('DatabaseUpdateInterval', UPDATE_ONCE_STARTUP)\n if update_interval_option == UPDATE_ONCE_DAY:\n self.updateEveryDayRadioBtn.SetValue(True)\n else:\n if update_interval_option == UPDATE_ONCE_MONTH:\n self.updateEveryMonthRadioBtn.SetValue(True)\n else:\n if update_interval_option == UPDATE_ONCE_WEEK:\n self.updateEveryWeekRadioBtn.SetValue(True)\n else:\n if update_interval_option == UPDATE_ONCE_STARTUP:\n self.updateEveryStartupRadioBtn.SetValue(True)\n else:\n self.neverUpdateRadioBtn.SetValue(True)\n\n def OnOK(self, optionsDialog):\n GetApp().MainFrame.ShowView(consts.PYTHON_INTERPRETER_VIEW_NAME, hidden=not self._showBuiltinInterpreterWindowVar.get(), toogle_visibility_flag=True)\n utils.profile_set(consts.PYTHON_INTERPRETER_VIEW_NAME + 'ViewVisible', int(self._showBuiltinInterpreterWindowVar.get()))\n utils.profile_set('DatabaseUpdateInterval', self.GetUpdateIntervalOption())\n utils.profile_set('AutoGenerateDatabase', int(self._createIntellienseDatabaseVar.get()))\n utils.profile_set('EmbedInterpreterInterminator', self._embedInterpreterVar.get())\n utils.profile_set('RemberPipsource', self._rember_pipsourceVar.get())\n return True\n\n def IsAutoGenerateDatabase(self):\n return self._createIntellienseDatabaseVar.get()","sub_path":"pycfiles/NovalIDE-1.1.8-py3.5/gerneralconfiguration.cpython-35.py","file_name":"gerneralconfiguration.cpython-35.py","file_ext":"py","file_size_in_byte":10540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"329673844","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[51]:\n\n\nimport database_functions as db\nimport pandas as pd\nfrom datetime import datetime\nimport discord as ds\nfrom discord import Webhook, RequestsWebhookAdapter\nimport requests\nimport time\nimport os\nimport csv\n\n\n# In[52]:\n\n\n#datatest = db.contractmakerfordb()\n#datatest = pd.read_csv('usethis.csv', header = 0, names = ['market_name', 'market_id', 'contract_best_buy_no_price','market_name', 'market_id', 'fine'])\n#datatest.head(10)\n\n\n# In[ ]:\n\n\n\n\n\n# In[53]:\n\n\ndef riskless_algo():\n runtime_start = datetime.now()\n data = db.contractmakerfordb()\n #data = datatest\n goodmarkets = []\n idlist = []\n marketids = data.market_id.unique()\n for market in marketids:\n datalist = data[data.market_id == market].contract_best_buy_no_price\n prices1 = datalist.dropna()\n sum_list = prices1.sum()\n if sum_list < len(prices1):\n \n prices = prices1.tolist()\n prices.sort()\n marketname = data[data.market_id == market].market_name.drop_duplicates()\n \n finalprice = [1] * (len(prices) - 1)\n finalprice.append(0)\n \n grossprofit = []\n for i in range(len(prices)-1):\n grossprofit.append(finalprice[i] - prices[i])\n grossprofit[i] = round(grossprofit[i] * 0.9, 3)\n grossprofit.append(0)\n addprin = []\n for i in range(len(prices)):\n addprin.append(prices[i] + grossprofit[i])\n addprin[len(prices) - 1] = 0\n risk = (sum(addprin) - round(sum(prices),3))\n if risk > 0.01:\n goodmarkets.append([market, marketname.to_string(index=False), round(risk*1000)/1000, prices])\n idlist.append(market)\n #add conditional - check the API again to see if prices changed (recursive)\n else:\n None\n debuglist = goodmarkets\n runtime_end = datetime.now()\n total_runtime = runtime_end-runtime_start\n return goodmarkets, debuglist, idlist, total_runtime\n\n\n# In[54]:\n\n\ndef riskless_internal(debug_file):\n now= datetime.now()\n nowstring = now.hour\n webhook = Webhook.partial(674056380772253697, \n 'RTuPvf30qDTtx-WM3s_bfiqxtEJ29KRHadOOqW-2glW-zfCW1Q8NHFIRu1px7qU2RqFi', \n adapter=RequestsWebhookAdapter())\n previouslist = []\n prev_ids = []\n while nowstring != 3:\n marketlist, debuglist, ids, runtime = riskless_algo()\n# print(marketlist)\n# print(debuglist)\n# print(ids)\n if debuglist == []:\n if previouslist != []:\n webhook.send('Markets are no longer riskless!')\n else:\n None\n else:\n n = 0\n m = 0\n for prev_id in prev_ids:\n n = n+1\n if prev_ids == []:\n None\n elif prev_id not in ids:\n webhook.send('Market no longer riskless! Market ID: '+str(prev_id)+', Market Name: '+\n previouslist[n-1][1])\n else:\n None\n for good_id in ids:\n m = m+1\n if good_id in prev_ids:\n None\n else:\n webhook.send('Market ID: '+str(good_id)+', Market Name: '+\n debuglist[m-1][1]+', Expected Profit per Share: '+ str(debuglist[m-1][2])\n +', Link to Market: https://www.predictit.org/markets/detail/'+str(good_id)+\n ' , Prices of Contracts: ' + str(debuglist[m-1][3]))\n previouslist = debuglist\n prev_ids = ids\n with open(debug_file, 'a') as file:\n writer = csv.writer(file)\n line = debuglist\n insertion = ([datetime.now()]+line)\n writer.writerow(insertion)\n file.close()\n time.sleep(60)\n now= datetime.now()\n nowstring = now.hour\n else:\n time.sleep(60)\n now= datetime.now()\n nowstring = now.hour\n riskless_internal(debug_file)\n\n\n# In[55]:\n\n\ndef riskless_loop():\n debug_file = 'riskless_debug_file.csv'\n if os.path.exists(debug_file) == True:\n os.remove(debug_file)\n try: \n riskless_internal(debug_file)\n except: \n time.sleep(60)\n riskless_loop()\n else:\n try:\n riskless_internal(debug_file)\n except:\n time.sleep(60)\n riskless_loop()\n\n\n# In[56]:\n\n\nriskless_loop()\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Riskless/Riskless_Analysis_v1.py","file_name":"Riskless_Analysis_v1.py","file_ext":"py","file_size_in_byte":4657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"222669496","text":"\"\"\"Example 001: Creating a clickwrap\"\"\"\n\nfrom os import path\nimport json\n\nfrom docusign_click.client.api_exception import ApiException\nfrom flask import render_template, current_app, Blueprint, session\n\nfrom .controller import Eg001Controller\nfrom app.docusign import authenticate\nfrom app.ds_config import DS_CONFIG\nfrom app.error_handlers import process_error\n\neg = \"eg001\" # Reference (and URL) for this example\neg001 = Blueprint(\"eg001\", __name__)\n\n\n@eg001.route(\"/eg001\", methods=[\"POST\"])\n@authenticate(eg=eg)\ndef create_clickwrap():\n \"\"\"\n 1. Get required arguments\n 2. Call the worker method\n 3. Render the response\n \"\"\"\n # 1. Get required arguments\n args = Eg001Controller.get_args()\n\n try:\n # 2. Call the worker method to create a new clickwrap\n results = Eg001Controller.worker(args)\n clickwrap_id = results.clickwrap_id\n clickwrap_name = args['clickwrap_name']\n current_app.logger.info(\n f\"\"\"The clickwrap \"{clickwrap_name}\" has been created!\"\"\"\n )\n except ApiException as err:\n return process_error(err)\n\n # Save for use by other examples which need a clickwrap parameter.\n session[\"clickwrap_id\"] = clickwrap_id\n session[\"clickwrap_name\"] = clickwrap_name\n session[\"clickwrap_is_active\"] = False\n\n # 3. Render the response\n return render_template(\n \"example_done.html\",\n title=\"Creating a new clickwrap\",\n h1=\"Creating a new clickwrap\",\n message=f\"\"\"The clickwrap \"{args['clickwrap_name']}\" has been created!\"\"\",\n json=json.dumps(json.dumps(results.to_dict(), default=str))\n )\n\n\n@eg001.route(\"/eg001\", methods=[\"GET\"])\n@authenticate(eg=eg)\ndef get_view():\n \"\"\"responds with the form for the example\"\"\"\n return render_template(\n \"eg001_create_clickwrap.html\",\n title=\"Creating a new clickwrap\",\n source_file=path.basename(path.dirname(__file__)) + \"/controller.py\",\n source_url=DS_CONFIG[\"github_example_url\"] + path.basename(\n path.dirname(__file__)) + \"/controller.py\",\n )\n","sub_path":"app/click/examples/eg001_create_clickwrap/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"68201961","text":"import requests\nimport os\nfrom bs4 import BeautifulSoup\nimport reader\nimport re\nimport json\nfrom link import Link\n\ns = requests.session()\nr = s.get('https://globenewswire.com/Rss/orgclass/1')\nsoup = BeautifulSoup(r.content)\n\ntitles = soup.select('item title')\ndescriptions = soup.select('item description')\nlinks = soup.select('item link')\n\nclass GlobeNewswire:\n\tdef __init__(self):\n\t\tself.url = 'https://globenewswire.com/Rss/orgclass/1'\n\n\tdef getLinks(self):\n\t\ts = requests.Session()\n\t\tr = s.get(self.url)\n\t\tsoup = BeautifulSoup(r.content)\n\t\tlinksArray = []\n\n\t\titems = soup.findAll('item')\n\t\t\n\t\tindex = 0;\n\n\t\tfor item in items:\n\t\t\ttitle = soup.select('item title')\n\t\t\tdescription = soup.select('item description')\n\t\t\tlink = soup.select('item link')\n\t\t\t\n\t\t\tmatch = re.search(\"[(]\\s?nasdaq(:| :|: | :|)\\s?(?P[a-z][a-z][a-z][a-z]?)\\s?[)]\",item.getText().lower())\n\t\t\tif match:\n\t\t\t\tif match.group(\"symbol\"):\n\t\t\t\t\tfor symbol in reader.requestArray:\n\t\t\t\t\t\tif symbol[0].lower() == match.group(\"symbol\"):\n\t\t\t\t\t\t\tnewLink = Link()\n\t\t\t\t\t\t\tnewLink.symbol = symbol[0]\n\t\t\t\t\t\t\tnewLink.url = link[index].text\n\t\t\t\t\t\t\tnewLink.text = description[index].text\n\t\t\t\t\t\t\tnewLink.linkText = title[index].text\n\t\t\t\t\t\t\t#newLink.date = dates[index].text.strip()\n\t\t\t\t\t\t\tnewLink.source = \"GlobeNewswire\"\n\t\t\t\t\t\t\tlinksArray.append(newLink)\n\t\t\tindex= index+1\n\n\t\treturn linksArray","sub_path":"Src/globeNewswire.py","file_name":"globeNewswire.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"187642807","text":"import argparse\n\nimport torch\nfrom torch import optim\n\nfrom loader import get_loader\nfrom models import Discriminator, Generator\nfrom Etrainer3 import Trainer\nfrom utils import PlotHelper\nfrom wgan64x64 import GoodGenerator, GoodDiscriminator, Encoder\n\n\ndef main():\n torch.random.manual_seed(1256)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--root', type=str, default='/data/weishizheng/x-ray/rsna-data/train')\n parser.add_argument('--batch-size', type=int, default=64)\n parser.add_argument('--lr', type=float, default=2e-4)\n parser.add_argument('--workers', type=int, default=0)\n parser.add_argument('--epochs', type=int, default=20)\n config = parser.parse_args()\n device = torch.device('cuda:0')\n\n # networks\n net_g = GoodGenerator().to(device)\n net_d = GoodDiscriminator().to(device)\n net_e = Encoder(64, 64).to(device)\n\n # Load models\n name = 'xray_model_10'\n net_e.load_state_dict(torch.load('./models/3rd/enc3_' + name + '.pt'))\n net_g.load_state_dict(torch.load('./models/3rd/gen_' + name + '.pt'))\n net_d.load_state_dict(torch.load('./models/3rd/dis_' + name + '.pt'))\n\n # print(net_g)\n # print(net_d)\n\n # optimizer\n optimizer_e = optim.Adam(net_e.parameters(), lr=config.lr, betas=(0.0, 0.9))\n\n print(optimizer_e)\n\n # data loader\n dataloader = get_loader(config.root, config.batch_size, config.workers)\n testdataloader = get_loader('/data/weishizheng/x-ray/rsna-data/test', config.batch_size, config.workers)\n\n trainer = Trainer(net_g, net_d, net_e, optimizer_e, dataloader, testdataloader, device)\n plotter = PlotHelper('samples/loss.html')\n for epoch in range(config.epochs):\n loss_e = trainer.train()\n trainer.test()\n print('Train epoch: {}/{},'.format(epoch + 1, config.epochs),\n 'loss e: {:.6f}.'.format(loss_e))\n\n trainer.save_sample('reconstruction3/sample_{:02d}.jpg'.format(epoch + 1))\n\n trainer.save_reconstruct('reconstruction3/reconstruct_')\n\n # Save models\n name = 'xray_model_10'\n torch.save(trainer.net_e.state_dict(), './models/3rd/enc3_' + name + '.pt')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"f-anoGAN/encode3.py","file_name":"encode3.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"196078838","text":"# Given a read only array of n + 1 integers between 1 and n, find one number that repeats in linear time using less than O(n) space and traversing the stream sequentially O(1) times.\n\n# Sample Input:\n\n# [3 4 1 4 1]\n# Sample Output:\n\n# 1\n# If there are multiple possible answers ( like in the sample case above ), output any one.\n\n# If there is no duplicate, output -1\nclass Solution:\n # @param A : tuple of integers\n # @return an integer\n def repeatedNumber(self, A):\n #Treat array as a linked list\n #Slow moves 1 step\n slow = A[0]\n #Fast moves 3 steps\n fast = A[A[0]]\n while slow != fast:\n slow = A[slow]\n fast = A[A[fast]]\n fast = 0\n while slow!=fast:\n slow = A[slow]\n fast = A[fast]\n if slow == 0:\n return -1\n return slow\n","sub_path":"InterviewBit/Arrays/find-duplicate-in-array.py","file_name":"find-duplicate-in-array.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"99683931","text":"import json\r\nimport smtplib\r\nimport time\r\nimport urllib2\r\n\r\ndef getStreamStatus(users):\r\n\tonline = []\r\n\r\n\tfor user in users:\r\n\t\tresponse = urllib2.urlopen('https://api.twitch.tv/kraken/streams/' + user)\r\n\t\tstream = json.loads(response.read())\r\n\t\tif not (stream['stream'] is None):\r\n\t\t\tonline.append(user)\r\n\t\ttime.sleep(1)\r\n\treturn online\r\n\r\ndef sendEmail(msg):\r\n\t# Gmail account where emails are sent from\r\n\tUSER = 'ur.email@gmail.com'\r\n\t# Gmail account password\r\n\tPASSWORD = 'ur.password'\r\n\t# Email address displayed as the sender\r\n\tFROM = 'ur.email@gmail.com'\r\n\t# List recipients email addresses\r\n\tTO = ['foo@sample.com', 'bar@sample.com']\r\n\tMSG = '\\r\\n'.join([\r\n\t\t'From: ' + FROM,\r\n\t\t'To: ' + ', '.join(TO),\r\n\t\t'Subject: Twitch users are streaming ...',\r\n\t\t'',\r\n\t\tmsg\r\n\t])\r\n\r\n\tserver = smtplib.SMTP('smtp.gmail.com', 587)\r\n\tserver.ehlo()\r\n\tserver.starttls()\r\n\tserver.login(USER, PASSWORD)\r\n\tserver.sendmail(FROM, TO, MSG)\r\n\tserver.close()\r\n\treturn\r\n\r\ndef init():\r\n\t# List of Twitch usernames to check \r\n\tUSERS = [\r\n\t\t'username'\r\n\t]\r\n\r\n\tonline = getStreamStatus(USERS)\r\n\tif len(online) > 0:\r\n\t\tmsg = '\\r\\n'.join(online)\r\n\t\tsendEmail(msg)\r\n\treturn\r\n\r\ninit()","sub_path":"twitch-notifier.py","file_name":"twitch-notifier.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"330786391","text":"'''\nleetcode题号:94\n二叉树的中序遍历\nInput: [1,null,2,3]\n 1\n \\\n 2\n /\n 3\n\nOutput: [1,3,2]\n\n思路:\n递归实现:先递归调用左儿子,然后添加自己的值,然后调用右儿子\n\n迭代实现:用一个stack,沿着左子树遍历,节点先依次存在栈里,然后每弹出一个节点,就访问它的右子树\n'''\n# Definition for a binary tree node.\n\nfrom typing import List\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n'''\n递归实现\n'''\n# class Solution:\n# def inorderTraversal(self, root: TreeNode) -> List[int]:\n# res = []\n# self.helper(root, res)\n# return res\n#\n# def helper(self,node,res):\n# if node:\n# self.helper(node.left, res)\n# res.append(node.val)\n# self.helper(node.right, res)\n\n\n\n'''\n迭代实现:\n\n'''\nclass Solution:\n def inorderTraversal(self, root: TreeNode) -> List[int]:\n res = []\n stack =[]\n node = root\n while True:\n while node:\n stack.append(node)\n node = node.left\n #注意:stack是否为空的判断必须放在这里,如果放到后面,那么根节点的左子树遍历完就会输出\n if stack == []:\n return res\n node = stack.pop()\n res.append(node.val)\n node = node.right\n\n","sub_path":"src/StackAndQueue/94BinaryTreeInorderTraversal.py","file_name":"94BinaryTreeInorderTraversal.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"213185732","text":"import re\n\n#匹配:判断字符串是否以【正则表达式描述的样式pattern】开头\ndef reMatch():\n global resobj\n pattern = \"[Pp]ython\"\n str1 = \"天不生python,世间万古如长夜\"\n str2 = \"python一出谁与争锋\"\n str3 = \"Python一出谁与争锋\"\n resobj = re.match(pattern, str1)\n resobj = re.match(pattern, str2)\n resobj = re.match(pattern, str3)\n print(resobj)\n\n#检索字符串中是否包含【正则样式pattern】,只会检索到第一个\ndef reSearch():\n global resobj\n pattern = \"[Pp]ython\"\n str1 = \"天不生python,世间万古如长夜\"\n str2 = \"python一出谁与争锋\"\n str3 = \"天不生python,世间万古如长夜,农不务Python,地头一篇黄草丛,劳资不学派生,劳资吃什么!\"\n resobj = re.search(pattern, str1)\n resobj = re.search(pattern, str2)\n resobj = re.search(pattern, str3)\n print(resobj)\n\n#从字符串中检索出全部【正则样式pattern】的子串,形成列表\ndef reFindall():\n pattern = \"[Pp]ython|派生\"\n str1 = \"天不生python,世间万古如长夜\"\n str2 = \"Python一出谁与争锋\"\n str3 = \"天不生python,世间万古如长夜,农不务Python,地头一篇黄草丛,劳资不学派生,劳资吃什么!\"\n reslist = re.findall(pattern, str3)\n print(reslist)\n\n#将字符串中满足【正则样式pattern】的子串全部替换为指定字符串,并返回替换后的新字符串\ndef reSub():\n pattern = \"[Pp]ython|派生\"\n str1 = \"天不生python,世间万古如长夜,农不务Python,地头一篇黄草丛,劳资不学派生,劳资吃什么!\"\n # 把str1中满足pattern样式的子串全部替换为【大派生】,返回替换后的结果字符串\n resstr = re.sub(pattern, \"【大派生】\", str1)\n print(resstr)\n\n\nif __name__ == '__main__':\n # reMatch()\n # reSearch()\n # reFindall()\n # reSub()\n pass\n\n\n\n\n\n\n\n\n\n\n","sub_path":"01HelloRegexExpression.py","file_name":"01HelloRegexExpression.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"640853633","text":"import csv\nfrom matplotlib import pyplot as plt\nfrom datetime import datetime\n\n# Get dates, highs, and lows from tucson_1980.csv file.\nfilename = \"tucson_1980.csv\"\nwith open(filename) as f:\n reader = csv.reader(f)\n header_row = next(reader)\n\n dates, highs, lows = [], [], []\n for row in reader:\n # Only runs rows from Tucson Int'l Airport.\n if row[1] == \"TUCSON INTERNATIONAL AIRPORT, AZ US\":\n # Will create variables current_date, high, and low.\n try:\n current_date = datetime.strptime(row[2], \"%Y-%m-%d\")\n high = int(row[3])\n low = int(row[4])\n # Exception ran if above can't be done.\n except ValueError:\n print(current_date, 'missing data')\n # If all is present, variables are appended to their lists.\n else:\n dates.append(current_date)\n highs.append(high)\n lows.append(low)\n\n# Plotting data.\nfig = plt.figure(dpi=128, figsize=(10, 6))\nplt.plot(dates, highs, c='red', alpha=0.5, label=\"High\")\nplt.plot(dates, lows, c='blue', alpha=0.5, label=\"Low\")\nplt.fill_between(dates, highs, lows, facecolor=\"blue\", alpha=0.1)\n\n# Formatting plot.\nplt.title(\"High and low temps, Tucson AZ\\nJune 1980\", fontsize=18)\nplt.xlabel(\"\", fontsize=12)\nfig.autofmt_xdate()\nplt.ylabel(\"Temperature (F)\", fontsize=12)\nplt.tick_params(axis=\"both\", which=\"major\", labelsize=12)\nplt.legend()\n\nplt.savefig('tucson_temps_6-1980.png')\n","sub_path":"Project 2 - Data Visualization/Chapter 16 - Downloading Data/16-1 Tucson.py","file_name":"16-1 Tucson.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"584535026","text":"#!/usr/bin/env python\nfrom ctypes import *\nimport ConfigParser\n\ndll = cdll.LoadLibrary(\"./cnntext/cnn_predictor.so\")\ndll.load_model.restype = c_void_p\ndll.predict.restype = POINTER(c_float)\n\nconfig = ConfigParser.ConfigParser()\nconfig.read(\"./conf/cnntext.conf\")\n\nmodel_path = config.get('path','model_path')\nsetting_path = config.get('path','setting_path')\nhyfile_path = config.get('path','hyfile_path')\n\nnum_of_class = config.get('para','num_of_class')\ntop = config.get('para','top')\n\n\nclass cnn:\n \" CNN Predictor \"\n def __init__(self):\n self.Loadmodel(model_path,setting_path,hyfile_path,num_of_class,top)\n \n def Loadmodel(self, model_filename, setting_filename,hyfile ,num_of_class,top):\n self.__model = dll.load_model(c_char_p(model_filename), c_char_p(setting_filename))\n self.__dim = int(num_of_class)\n self.__top = int(top)\n self.__hy = []\n file = open(hyfile)\n for line in file:\n self.__hy.append(line.strip().split(' ')[1]) \n\n def Predict(self, text):\n res = dll.predict(c_void_p(self.__model), c_char_p(text))\n # max_prob = 0\n # imax = -1\n index = sorted(range(0,self.__dim), key=lambda i: res[i],reverse=True)\n top_index = index[0:self.__top]\n prob = [str(res[i]) for i in top_index]\n label = [self.__hy[i] for i in top_index]\n result = [0] * 2 * self.__top\n result[0::2] = label\n result[1::2] = prob\n return ' '.join(result)\n\n#if __name__ == '__main__':\n #from cnn import CNNPredictor\n #model,line = cnn(),open(\"/home/work/tonghan/LR_Model/test_data/test\",\"r\").readline()\n #print model.Predict(line)\n\n","sub_path":"SinaMod/cnntext/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"15441369","text":"#Bir aracın kilometrede ne kadar yaktığı ve kaç kilometre yol yaptığı bilgilerini alın ve sürücünü toplam ne kadar ödemesini gerektiğini hesaplayın.\n\na = float(input(\"Km de ne kadar yakıyorsunuz?\"))\nb = int(input(\"Kaç km yol yaptınız\"))\n\nödeme = a*b\nprint(\"Ödemeniz gereken tutar:{}tl\".format(ödeme))\n\n\n\n\n#yakan_miktar = float(input(\"Kilometrede yakan miktar:\"))\n\n#kilometre = int(input(\"Kaç km yol yaptınız:\"))\n\n#print(\"Tutar: {} tl\".format(yakan_miktar * kilometre))\n#Kilometrede yakan miktar:0.22\n#Kaç km yol yaptınız:430\n#Tutar: 94.6 tl","sub_path":"EXP/03exercise.py","file_name":"03exercise.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"526818108","text":"import queue\r\n\r\n\r\ndef topoSort(adj, indegree):\r\n zero_indegree = queue.PriorityQueue()\r\n topoSorted = []\r\n\r\n for i in range(n):\r\n if indegree[i] == 0:\r\n zero_indegree.put(i)\r\n\r\n while not zero_indegree.empty():\r\n u = zero_indegree.get()\r\n topoSorted.append(u)\r\n for v in adj[u]:\r\n indegree[v] -= 1\r\n if indegree[v] == 0:\r\n zero_indegree.put(v)\r\n\r\n return topoSorted\r\n\r\n\r\ndef main():\r\n global n, m\r\n n, m = map(int, input().split())\r\n adj = [[] for _ in range(n)]\r\n indegree = [0 for _ in range(n)]\r\n\r\n for i in range(m):\r\n u, v = map(int, input().split())\r\n u -= 1\r\n v -= 1\r\n adj[u].append(v)\r\n indegree[v] += 1\r\n\r\n res = topoSort(adj, indegree)\r\n if (len(res) < n):\r\n print(\"Sandro fails.\")\r\n return 0\r\n\r\n for i in range(n):\r\n print(\"{} \".format(res[i] + 1), end=\"\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"Orange/Lecture 01/Pro01.py","file_name":"Pro01.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"27669248","text":"from time import sleep\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\n# init driver\ndriver = webdriver.Chrome()\n# driver = webdriver.Chrome(executable_path=r'./drivers/chromedriver')\n\n# init driver\nchrome_options = webdriver.ChromeOptions()\nchrome_options.add_argument(\"--incognito\")\ndriver = webdriver.Chrome(chrome_options=chrome_options)\n\n# open the url\ndriver.get('https://www.amazon.com/')\nsleep(1)\n\nsearch_button = driver.find_element(By.XPATH, \"//span[@class='nav-line-2'][text()='& Orders']\")\nsearch_button.click()\n\n# verify\nassert 'Sign-In' in driver.find_element(By.XPATH, \"//div[@class='a-section']//h1[@class='a-spacing-small']\").text\n\nsleep(1)\ndriver.quit()","sub_path":"hw_2/HW2_Amazon_TC2.py","file_name":"HW2_Amazon_TC2.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"636577848","text":"#Updated by Christopher Snell\n#Portfolio\n# 15 September 2019\n\n# Run command -- python3 showstocks.py > mystocks.txt\n#\n#\n# Perform command -- cat mystocks.txt | grep -B 1 -A 6 \"\\Symbol:KO\\n\"\n# This will show one line before and 6 lines after\n# This will display the stock information appropriately\n\nfrom pprint import pprint\n \nimport robin_stocks as r\nimport requests\nimport wget\nimport json\nimport stocks as s\n'''\nRobinhood portfolio script\n\n'''\n\n#!!! Fill out username and password\nusername = ''\npassword = ''\n#!!!\n\nlogin = r.login(username,password)\n\nmypos = r.get_current_positions()\n\n\nfor counter in mypos:\n mystring = counter['instrument']\n myr = requests.get(mystring)\n try:\n jdata = (json.loads(myr.content))\n except ValueError as e:\n #print(\" \")\n continue\n except:\n continue\n\n\n try:\n print(\"Name:\" + str(jdata['simple_name']))\n except ValueError as f:\n continue\n except:\n continue\n print(\"Symbol:\" + jdata['symbol'])\n print(\"Shares:\" + counter['quantity'])\n bvalue = str(jdata['symbol'])\n try:\n cvalue = list(s.get_latest_price(bvalue))\n except ValueError as g:\n continue\n except: \n continue\n\n \n if len(cvalue) != 0:\n print(\"Price:\" + str(cvalue[0]))\n print(\"Average Cost:\" + counter['average_buy_price']) \n #bvalue = str(jdata['symbol'])\n #cvalue = s.get_latest_price(bvalue)\n #print(\"Price:\" + str(cvalue))\n\t #cvalue = str(get_quotes(bvalue))\n #print(cvalue)\n\t #print(tSymbol)\n\t #print(get_latest_price(jdata['symbol']))\n\t #myprice = get_latest_price(jdata['symbol'])\n\t #print(\"Average Cost:\" + counter['average_buy_price'])\n print(\"Total Return:\" + str(float(cvalue[0])*float(counter['quantity']) - float(counter['average_buy_price'])*float(counter['quantity']))) #Need active price of stock to calculate\n print(\"Equity:\" + str(float(cvalue[0])*float(counter['quantity']))) #Need active price of stock to calculate\n print(\"\\n\")\n\n","sub_path":"robin_stocks/showstocks.py","file_name":"showstocks.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"110176516","text":"import numpy as np\nfrom layers import Dense, Dropout, Flatten\n\nclass NeuralNetwork:\n\n def __init__(self, layers):\n self.layers = layers\n self.nb_layers = len(layers)\n\n def initialization(self, X, y):\n # Initialize layer's dimensions and weights\n A = X\n for l in range(self.nb_layers):\n input_dim = A.shape[-1]\n self.layers[l].initialize(input_dim)\n A = self.layers[l].forward(A)\n\n for l in range(self.nb_layers):\n if(type(self.layers[l]) != Dropout):\n self.layers[l].predict = self.layers[l].forward\n\n def forward(self, X):\n # Forward propagation\n A = X\n for l in range(self.nb_layers):\n A = self.layers[l].forward(A)\n out = A\n return out\n\n #def backward\n\n def train(self, X_train, y_train, opt, nb_epochs, batch_size, learning_rate, cost):\n self.initialization(X_train[:2], y_train[:2])\n opt.train(X_train, y_train, self, nb_epochs, batch_size, learning_rate, cost)\n\n def predict(self, X_test):\n A = X_test\n for l in range(self.nb_layers):\n A = self.layers[l].predict(A)\n return A\n","sub_path":"neural_net_from_scratch/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"297034472","text":"import dataclasses\nimport io\nfrom typing import List, Tuple, Dict, Any, Union, cast\n\nimport torch\n\nfrom torch.distributed._shard._utils import narrow_tensor_by_index\nfrom torch.distributed._shard.sharded_tensor import ShardedTensor\n\n\nfrom .planner import (\n SavePlanner,\n LoadPlanner,\n SavePlan,\n LoadPlan,\n ReadItem,\n WriteItem,\n WriteItemType,\n)\n\nfrom .metadata import (\n BytesStorageMetadata,\n TensorStorageMetadata,\n MetadataIndex,\n Metadata,\n STATE_DICT_TYPE,\n STORAGE_TYPES\n)\n\nfrom .planner_helpers import (\n _create_read_items,\n _create_write_items,\n _create_default_metadata_only_plan\n)\n\nfrom .utils import (\n find_state_dict_object\n)\n\nclass DefaultSavePlanner(SavePlanner):\n def init(self, state_dict: Dict[str, Any], is_coordinator: bool) -> None:\n self.state_dict = state_dict\n self.is_coordinator = is_coordinator\n\n def create_local_plan(self) -> SavePlan:\n self.plan = create_default_local_save_plan(self.state_dict, self.is_coordinator)\n return self.plan\n\n def create_global_plan(self, all_plans: List[SavePlan]) -> Tuple[List[SavePlan], Metadata]:\n self.global_plan, self.metadata = create_default_global_save_plan(all_plans)\n return self.global_plan, self.metadata\n\n def finish_plan(self, new_plan: SavePlan) -> SavePlan:\n self.plan = new_plan\n return new_plan\n\n def resolve_data(self, write_item: WriteItem) -> Union[torch.Tensor, io.BytesIO]:\n object = self.lookup_object(write_item.index)\n return self.transform_object(write_item, object)\n\n def lookup_object(self, index: MetadataIndex) -> Any:\n \"\"\"\n This is an extension from the planner interface to make it easy to extend the default planner\n \"\"\"\n return find_state_dict_object(self.state_dict, index)\n\n def transform_object(self, write_item: WriteItem, object: Any):\n \"\"\"\n This is an extension from the planner interface to make it easy to extend the default planner\n \"\"\"\n if write_item.type == WriteItemType.BYTE_IO:\n bytes = io.BytesIO()\n torch.save(object, bytes)\n object = bytes\n return object\n\n\nclass DefaultLoadPlanner(LoadPlanner):\n def init(self, state_dict: STATE_DICT_TYPE, metadata: Metadata, is_coordinator: bool) -> None:\n self.state_dict = state_dict\n self.metadata = metadata\n self.is_coordinator = is_coordinator\n\n def create_local_plan(self) -> LoadPlan:\n return create_default_local_load_plan(self.state_dict, self.metadata)\n\n def create_global_plan(self, global_plan: List[LoadPlan]) -> List[LoadPlan]:\n return create_default_global_load_plan(global_plan)\n\n def finish_plan(self, new_plan: LoadPlan) -> LoadPlan:\n return new_plan\n\n def load_bytes(self, read_item: ReadItem, value: io.BytesIO) -> None:\n self.state_dict[read_item.dest_index.fqn] = torch.load(value)\n\n def resolve_tensor(self, read_item: ReadItem):\n tensor = self.lookup_tensor(read_item.dest_index)\n return self.transform_tensor(read_item, tensor)\n\n def commit_tensor(self, read_item: ReadItem, tensor: torch.Tensor) -> None:\n pass\n\n def lookup_tensor(self, index: MetadataIndex) -> torch.Tensor:\n \"\"\"\n This is an extension from the planner interface to make it easy to extend the default planner\n \"\"\"\n return find_state_dict_object(self.state_dict, index)\n\n def transform_tensor(self, read_item: ReadItem, tensor: torch.Tensor):\n \"\"\"\n This is an extension from the planner interface to make it easy to extend the default planner\n \"\"\"\n return narrow_tensor_by_index(tensor, read_item.dest_offsets, read_item.lengths)\n\n\ndef create_default_local_load_plan(\n state_dict: Dict[str, Any],\n metadata: Metadata,\n) -> LoadPlan:\n requests = []\n \"\"\"\n Create the ``LoadPlan`` used by DefaultLoadPlanner.\n\n It produces one read item per value in ``state_dict`` using the metadata in ``metadata``.\n\n The default behavior is to match key exactly between state_dict and metadata.\n It handles resharding by issuing multiple read requests against storage in order to match\n load requirements.\n \"\"\"\n for fqn, obj in state_dict.items():\n md = metadata.state_dict_metadata[fqn]\n requests += _create_read_items(fqn, md, obj)\n\n return LoadPlan(requests)\n\ndef create_default_global_load_plan(all_plans: List[LoadPlan]) -> List[LoadPlan]:\n \"\"\"\n Create global load plan used by DefaultLoadPlanner.\n\n The default load behavior involved no global coordination and this function\n currently doesn't change the local plans.\n \"\"\"\n return all_plans\n\ndef create_default_local_save_plan(state_dict: Dict[str, Any], is_coordinator: bool) -> SavePlan:\n \"\"\"\n Create the ``SavePlan`` used by DefaultSavePlanner.\n\n On non-coordinator ranks, this function ignores tensors and non-tensor objects,\n only producing writes for ShardedTensor objects.\n\n On the coordinator rank, produce writes for all values.\n \"\"\"\n requests = []\n for fqn, obj in state_dict.items():\n if isinstance(obj, ShardedTensor) or is_coordinator:\n requests += _create_write_items(fqn, obj)\n return SavePlan(requests)\n\ndef create_default_global_save_plan(all_plans: List[SavePlan]) -> Tuple[List[SavePlan], Metadata]:\n \"\"\"\n Create the global plan and metadata used by DefaultSavePlanner.\n\n Metadata is produced by concatenating the metadata of all ``WriteItem`` from the supplied plans.\n\n The only global planning change is to update index hints in all ``MetadataIndex`` objects.\n \"\"\"\n md: Dict[str, STORAGE_TYPES] = {}\n new_plans = []\n for plan in all_plans:\n new_items = []\n for item in plan.items:\n if not item.type == WriteItemType.SHARD:\n assert item.index.fqn not in md\n\n if item.type == WriteItemType.BYTE_IO:\n md[item.index.fqn] = BytesStorageMetadata()\n new_items.append(item)\n else:\n assert item.tensor_data is not None\n tensor_md = cast(\n TensorStorageMetadata,\n md.setdefault(item.index.fqn, TensorStorageMetadata(\n properties=item.tensor_data.properties,\n size=item.tensor_data.size,\n chunks=[],\n ))\n )\n new_index = dataclasses.replace(item.index, index=len(tensor_md.chunks))\n new_item = dataclasses.replace(item, index=new_index)\n new_items.append(new_item)\n\n assert item.tensor_data.chunk is not None, f\"Cannot create MD for tensor without bounds. FQN: {item.index.fqn}\"\n tensor_md.chunks.append(item.tensor_data.chunk)\n new_plans.append(dataclasses.replace(plan, items=new_items))\n return (new_plans, Metadata(md))\n\ndef _create_default_local_metadata(state_dict: STATE_DICT_TYPE) -> Metadata:\n \"\"\"\n Return the ``Metadata`` if DefaultSavePlanner was used to checkpoint ``state_dict``.\n \"\"\"\n plan = _create_default_metadata_only_plan(state_dict)\n _, md = create_default_global_save_plan([plan])\n return md\n","sub_path":"torch/distributed/_shard/checkpoint/default_planner.py","file_name":"default_planner.py","file_ext":"py","file_size_in_byte":7300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"141620900","text":"# -*- coding: UTF-8 -*-\nimport os\nimport os.path\nimport logging\nimport logging.handlers\nimport io\nimport sys\nimport configparser\nimport time\nimport openpyxl\n\n\n\ndef make_loger():\n\tglobal loger\n\tglobal myname\n\tglobal mydir\n\t# Создать регистратор верхнего уровня\n\tlogFileName = os.path.join( mydir, 'get_price_'+ orgName +'.log')\n\tloger= logging.getLogger(myname)\n\tsubject = 'Subj_for_alert_mail, ' + myname \n\tloger.setLevel(logging.DEBUG)\n\thandlr1\t =logging.handlers.RotatingFileHandler(logFileName, 'a', 128000, 5)\n\thandlr2 =logging.handlers.SMTPHandler(('192.168.10.3',25), 'docn@meijin.ru', 'kissupport@meijin.ru', subject, credentials=None, secure=None)\n\thandlr2.setLevel(logging.CRITICAL) \n\tloger.addHandler(handlr1)\n\tloger.addHandler(handlr2)\n\t#loger.propagate = True\n\t# Описать форматы для обработчиков\n\tfmt1 = logging.Formatter('%(asctime)s %(levelname)-7s %(message)s')\n\thandlr1.setFormatter(fmt1)\n\thandlr2.setFormatter(fmt1)\n\n\ndef config_read():\n\tglobal loger\n\tglobal SheetName\n\tglobal FilenameIn\n\tglobal FilenameOut\n\tglobal out_columns_names\n\tglobal out_columns_j\n\tglobal in_columns_j\n\tglobal colGrp\n\tglobal colSGrp\n\tglobal GrpFonti\n\tglobal SubGrpFonti\n\tglobal HeaderFonti\n\tglobal RegularFontSize\n\tglobal SubGrpBackgroundColor\n\tglobal GrpBackgroundColor\n\tglobal strHeader\n\tglobal SubGrpFontSize\n\tglobal GrpFontSize\n\n\tcfgFName = os.path.join( mydir, myname + '.cfg')\n\tloger.debug('Begin config_read ' + cfgFName )\n\t\n\tconfig = configparser.ConfigParser()\n\tif os.path.exists(cfgFName):\t config.read( cfgFName)\n\telse : loger.debug('Не найден файл конфигурации.')\n\n\t# в разделе [cols_in] находится список интересующих нас колонок и номера столбцов исходного файла\n\tin_columns_names = config.options('cols_in')\n\tin_columns_j = {}\n\tfor vName in in_columns_names :\n\t\tif ('' != config.get('cols_in', vName)) :\n\t\t\tin_columns_j[vName] = config.getint('cols_in', vName) \n\t\n # По разделу [cols_out] формируем перечень выводимых колонок и строку заголовка результирующего CSV файла\n\ttemp_list = config.options('cols_out')\n\ttemp_list.sort()\n\n\tout_columns_names = []\n\tfor vName in temp_list :\n\t\tif ('' != config.get('cols_out', vName)) :\n\t\t\tout_columns_names.append(vName)\n\t\n\tout_columns_j = {}\n\tfor vName in out_columns_names :\n\t\ttName = config.get('cols_out', vName)\n\t\tif tName in in_columns_j :\n\t\t\tout_columns_j[vName] = in_columns_j[tName]\n\tprint('-----------------------------------')\n\tfor vName in out_columns_j :\n\t\tprint(vName, '\\t', out_columns_j[vName])\t\n\tprint('-----------------------------------')\n\tstrHeader = ','.join(out_columns_names) +',группа,подгруппа,'\n\tprint('HEAD =', strHeader)\n\n\t# считываем имена файлов и имя листа\n\tFilenameIn = config.get('input','Filename_in' )\n\tSheetName = config.get('input','SheetName' ) \n\tFilenameOut = config.get('input','Filename_out')\n\tprint('SHEET=', SheetName)\n\t\n\t# считываем признаки группы и подгруппы\n\tif ('' != config.get('grp_properties', 'группа')) :\n\t\tcolGrp = config.getint('grp_properties', 'группа')\n\tif ('' != config.get('grp_properties', 'подгруппа')) :\n\t\tcolSGrp = config.getint('grp_properties', 'подгруппа')\n\tif ('' != config.get('grp_properties', 'GrpFonti')) :\n\t\tGrpFonti = config.getint('grp_properties', 'GrpFonti')\n\tif ('' != config.get('grp_properties', 'SubGrpFonti')) :\n\t\tSubGrpFonti = config.getint('grp_properties','SubGrpFonti')\n\tif ('' != config.get('grp_properties', 'HeaderFonti')) :\n\t\tHeaderFonti = config.getint('grp_properties','HeaderFonti')\n\tif ('' != config.get('grp_properties', 'RegularFontSize')) :\n\t\tRegularFontSize = config.getint('grp_properties','RegularFontSize')\n\tif ('' != config.get('grp_properties', 'SubGrpFontSize')): \n\t SubGrpFontSize = config.getint('grp_properties','SubGrpFontSize')\n\tif ('' != config.get('grp_properties', 'GrpFontSize')) :\n\t GrpFontSize = config.getint('grp_properties', 'GrpFontSize')\n\tif ('' != config.get('grp_properties', 'SubGrpBackgroundColor')) :\n\t\tSubGrpBackgroundColor= config.getint('grp_properties','SubGrpBackgroundColor')\n\tif ('' != config.get('grp_properties', 'GrpBackgroundColor')) :\n\t\tGrpBackgroundColor = config.getint('grp_properties', 'GrpBackgroundColor')\n\tsubgrpfontbold = config.get('grp_properties','subgrpfontbold')\n\tgrpfontbold = config.get('grp_properties', 'grpfontbold')\n\treturn FilenameIn\n\n\n\ndef quoted(sss):\n\tif (',' in sss) or ('\"' in sss) : # and not(sss[0]=='\"' and sss[-1]=='\"') :\n\t\tsss = '\"'+sss.replace('\"','\"\"')+'\"'\n\treturn sss\n\n\n\ndef main( ):\n\t# Создать регистратор (loger)\n\tmake_loger()\n\tloger.debug('Begin main')\n\n\t# Прочитать конфигурацию из файла\n\tff = config_read()\n\tloger.debug('Открываю файл '+ FilenameIn)\n\tbook = openpyxl.load_workbook(filename = os.path.join( mydir, FilenameIn), read_only=False, keep_vba=False, data_only=False, use_iterators=False)\n\t\n\tloger.debug('Устанавливаю страницу '+ SheetName)\n\tprint(SheetName)\n\tsh = book[SheetName]\n\n\tssss = []\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t# цикл по строкам файла\n\tprint( 'На странице %d строк' % book[SheetName].max_row)\n\tfor i in range(book[SheetName].min_row, book[SheetName].max_row+1) :\n\t\ti_last = i\n\t\ttry:\n\t\t\tccc = sh.cell(row=i, column=colGrp)\n\t\t\tif ccc.value == None :\n\t\t\t\tprint (i, colGrp, 'Пусто!!!')\n\t\t\t\tcontinue\n\t\t\t'''\t # Атрибуты шрифта для настройки конфига\n\t\t\tprint( 'Строка', i, ccc.value,)\n\t\t\tprint( 'font=', ccc.font.name,)\n\t\t\tprint( 'bold=', ccc.font.bold,)\n\t\t\tprint( 'italic=', ccc.font.italic,)\n\t\t\tprint( 'size=', ccc.font.size)\n\t\t\tprint( 'colour=', ccc.font.color.rgb)\n\t\t\tprint( 'background=',ccc.fill.fill_type)\n\t\t\tprint( 'backgroundColor1=', ccc.fill.start_color)\n\t\t\tprint( 'backgroundColor2=', ccc.fill.end_color)\n\t\t\t'''\n#\t\t\tprint( 'Строка', i, 'столбец', colGrp, 'значение', ccc.value)\n\t\t\tif GrpFontSize == ccc.font.size : \t\t\t\t# Группа\n\t\t\t\tgrpName = quoted(sh.cell(row=i, column=colGrp).value)\n\t\t\t\tsubGrpName = ''\n\t\t\t\tprint('группа', grpName)\n\t\n\t\t\telif SubGrpFontSize == ccc.font.size :\t\t\t # Подгруппа\n\t\t\t\tsubGrpName = quoted(sh.cell(row=i,column=colSGrp).value)\n\t\n\t\t\telif True == ccc.font.bold :\t \t\t\t\t\t\t# Заголовок таблицы\n\t\t\t\tpass\n\t\n\t\t\telif ('' == sh.cell(row=i, column=out_columns_j['код']).value) :\t# Пустая строка\n\t\t\t\tpass\n\t\t\t\tprint( 'Пустая строка:', sh.cell(row=i, column=out_columns_j['код']).value )\n\t\n\t\t\telif RegularFontSize == ccc.font.size :\t\t\t\t\t# Информационная строка\n\t\t\t\tccc = sh.cell(row=i, column=out_columns_j['код'])\n\t\t\t\tcode = ccc.value\n\t\t\t\tsss = []\t\t\t\t\t\t\t\t\t# формируемая строка для вывода в файл\n\t\t\t\tfor strname in out_columns_names :\n\t\t\t\t\tif strname in out_columns_j :\n\t\t\t\t\t\t# берем значение из соответствующей ячейки файла\n\t\t\t\t\t\tj = out_columns_j[strname] \n\t\t\t\t\t\tccc = sh.cell(row=i, column=j)\n\t\t\t\t\t\tcellType = ccc.data_type\n\t\t\t\t\t\tcellValue = ccc.value\n#\t\t\t\t\t\tprint (cellType, cellValue)\n\t\t\t\t\t\tif cellValue == None : \n\t\t\t\t\t\t\tss = ''\n\t\t\t\t\t\telif cellType in ('n') : # numeric\n\t\t\t\t\t\t\tif int(cellValue) == cellValue:\n\t\t\t\t\t\t\t\tss = str(int(cellValue))\n\t\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t\tss = str(cellValue)\n\t\t\t\t\t\telif strname in ('закупка','продажа','цена1', 'цена2') :\n\t\t\t\t\t\t\tss = '0' \n\t\t\t\t\t\telif cellType == 's' :\n\t\t\t\t\t\t\tss = quoted(cellValue ) \n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tss = ''\n\t\t\t\t\telse : \n\t\t\t\t\t # вычисляемое поле\n\t\t\t\t\t\ts1 = sh.cell(row=i, column=in_columns_j['склад1']).value\n\t\t\t\t\t\ts2 = sh.cell(row=i, column=in_columns_j['склад2']).value\n\t\t\t\t\t\ts3 = sh.cell(row=i, column=in_columns_j['склад3']).value\n\t\t\t\t\t\tif s1 == '' :\n\t\t\t\t\t\t\ts1 = '0'\n\t\t\t\t\t\telse :\n\t\t\t\t\t\t\ts1 = str(int( s1 ))\n\t\t\t\t\t\tif s2 == '' : \n\t\t\t\t\t\t\ts2 = '0'\n\t\t\t\t\t\telse :\n\t\t\t\t\t\t\ts2 = str(int( s2 ))\n\t\t\t\t\t\tif s3 == '' : \n\t\t\t\t\t\t\ts3 = '0'\n\t\t\t\t\t\telse :\n\t\t\t\t\t\t\ts3 = str(int( s3 ))\n\t\t\t\t\t\tss = s1 + '/' + s2 + '/' + s3\n\t\t\t\t\t\tpass\n\t\t\t\t\tsss.append(ss)\n\t\n\t\t\t\tsss.append(grpName)\n\t\t\t\tsss.append(subGrpName)\n\t\t\t\tssss.append(','.join(sss))\n\t\t\telse :\n\t\t\t\tloger.debug('Нераспознана строка: <' + sh.cell(row=i, column=out_columns_j['код']).value + '>' )\n\t\texcept Exception as e:\n\t\t\tloger.debug('Exception: <' + str(e) + '> при обработке строки ' + str(i) +'<' + '>' )\n\t\t\traise e\n\t\n\tloger.info('Обработано %s строк прайса.' % i_last )\t\n\n\tf2 = open( os.path.join( mydir, FilenameOut), 'w')\n\tdata = ',\\n'.join(ssss) +','\n\tf2.write(strHeader+'\\n')\n\tf2.write(data)\n\tf2.close()\n\n\nif __name__ == '__main__':\n\tglobal myname\n\tglobal mydir\n\tglobal orgName\n\tmyname = os.path.basename(os.path.splitext(sys.argv[0])[0])\n\tmydir = os.path.dirname (sys.argv[0])\n\torgName = myname[:-5]\n\tprint('myname =', myname)\n\tprint('mydir =', mydir)\n\tprint('myname.ext=' + os.path.basename(sys.argv[0]))\n\tprint('orgName =', orgName)\n\tmain( )\n","sub_path":"network_laboratory_auto.py","file_name":"network_laboratory_auto.py","file_ext":"py","file_size_in_byte":9433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"333479082","text":"from packages import distance\nfrom geojson import FeatureCollection, Point\n# Takes a bounding box and a cell depth and returns\n# a set of {@link Point|points} in a grid.\n#\n# @name pointGrid\n# @param {Array} bbox extent in [minX, minY, maxX, maxY] order\n# @param {number} cellSize the distance across each cell\n# @param {string} [units=kilometers] used in calculating cellSize,\n# can be degrees, radians, miles, or kilometers\n# @return {FeatureCollection} grid of points\n# @example\n# var extent = [-70.823364, -33.553984, -70.473175, -33.302986];\n# var cellSize = 3;\n# var units = 'miles';\n#\n# var grid = turf.pointGrid(extent, cellSize, units);\n#\n# //=grid\ndef point_grid(bbox, cellSize, units):\n fc = FeatureCollection([])\n x_fraction = cellSize / distance(Point((bbox[0], bbox[1])),\n Point((bbox[2], bbox[1])), units)\n cell_width = x_fraction * (bbox[2] - bbox[0])\n y_fraction = cellSize / distance(Point((bbox[0], bbox[1])),\n Point((bbox[0], bbox[3])), units)\n cell_height = y_fraction * (bbox[3] - bbox[1])\n\n current_x = bbox[0]\n while current_x <= bbox[2]:\n current_y = bbox[1]\n while current_y <= bbox[3]:\n fc[\"features\"].append(Point((current_x, current_y)))\n\n current_y += cell_height\n current_x += cell_width\n\n return fc\n","sub_path":"packages/turf_point_grid/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"362806906","text":"from django.shortcuts import render\nfrom .models import Commentdetails\n\ndef Showindex(request):\n value = request.session(\"username\")\n if value == \"\":\n return render(request,\"comment.html\")\n else:\n return render(request,\"index.html\",{\"message\":\"comment is alredy given\"})\n\ndef index(request):\n return render(request,\"index.html\")\n\ndef comment(request):\n name = request.POST.get(\"t1\")\n contact = request.POST.get(\"t2\")\n comnt = request.POST.get(\"t3\")\n\n cd = Commentdetails(name=name,contact=contact,comment=comnt)\n cd.save()\n\n request.session[\"username\"] = name\n\n return render(request,\"index.html\",{\"mess\":\"comment saved\"})\n\n","sub_path":"ex_session/appex_session/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"277185678","text":"#!/usr/bin/env python3\n# Name: Justin Juang (jjuang)\n# Group Members: None\n\n'''\nProgram sequenceAnalysis.py is essentially a toolkit that inclues modules that are helpful\nin analysis of biological sequences, including DNA, RNA, and protein sequences. \n\nModules include:\n OrfFinder: finds and stores open reading frames in a sequence.\n NucParams: the module counts the number of nucleotides, codons, and amino acids of a given \n sequence and stores the information in respective dictionaries. \n ProteinParams: the module analyzes a given protein sequence, and returns the information.\n FastAReader: the module opens and reads a FastA file.\n\nPersonal Note:\nLab 3: ProteinParams\nLab 4: NucParams + FastAReader\nLab 5: OrfFinder\n'''\n\n################################################################################################################################################\n\nclass OrfFinder():\n '''Creates class OrfFinder that finds ORF in a given sequence.'''\n\n def __init__(self, seq):\n '''\n Init method that creates objects for later use.\n orfList is the list of orf found.\n complementStrand is the complement strand sequence. \n '''\n \n self.seq = seq #no need for upper() since file came with capitalized sequence\n\n self.orfList = [] #lists to store found orfs\n \n self.startCodons = ['ATG'] #list of start codon (no extra credit)\n self.stopCodons = ['TGA', 'TAG', 'TAA'] #list of stop codons\n \n #personal note: str.maketrans and .translate() goes together. \n nucComplements = str.maketrans('ATGC', 'TACG') #cool function i found str.maketrans on google that makes a translation table\n self.complementStrand = self.seq.translate(nucComplements)[::-1] #.translate is the function that actually translates\n\n def forwardFrame(self):\n '''Finds ORFs in the forward frame.'''\n\n startList = [] #list of all found start codon positions\n startFound = 0 #indicates if start codon is found \n codonFound = 0 #indicates if a codon is found \n\n for frame in range(0, 3): #for each frame (3 nucleotides at a time) \n \n startFound = 0\n codonFound = 0 #indicators initialized at 0\n startList = [] #start codon list cleared\n \n for nucleotide in range(frame, len(self.seq), 3): #for each nucleotide (poisition) in that frame\n \n codon = self.seq[nucleotide : nucleotide + 3] #a codon is three nucleotides\n \n #if the codon is in start codon (or ATG)\n if codon in self.startCodons: \n\n startList.append(nucleotide) #adds the position of the nucleotide to the start list (\"A\" pos)\n startFound = 1 #indicates that a start codon is found \n codonFound = 1\n\n #if codon is in stop codons and startFound = 1 (normal orf)\n if codon in self.stopCodons and startFound == 1:\n\n startPos = startList[0] + 1 - frame #the start pos of this orf is the first position in the list \n stopPos = nucleotide + 3 #and stop pos is at the nucleotide position + 3 (for codon) \n orfLength = stopPos - startPos + 1 #and orf length is just pos of stop - start\n \n orf = [(frame%3) + 1, startPos, stopPos, orfLength] #define orf\n self.orfList.append(orf) #and append it \n \n startList = [] #clears start list \n startFound = 0 \n codonFound = 1 \n\n #dangling stop: no start codon found (lonely stop dangling there without start)\n if codonFound == 0 and codon in self.stopCodons: \n startPos = 1 #the start pos would be the start of sequence (so 1)\n stopPos = nucleotide + 3 #and end pos would be the pos of stop codon + 3 \n orfLength = stopPos - startPos + 1 #orf length\n \n orf = [(frame%3) + 1, startPos, stopPos, orfLength] #define orf\n self.orfList.append(orf) #and append it\n\n startList = [] #clear start list \n codonFound = 1\n\n #dangling start: no stop found (but start found)\n if startFound == 1:\n startPos = startList[0] + 1 #then start pos is the pos of start codon\n stopPos = len(self.seq) #and stop pos is the end of sequence (no stop)\n orfLength = stopPos - startPos + 1\n orf = [(frame%3) + 1, startPos, stopPos, orfLength]\n self.orfList.append(orf)\n\n return self.orfList #returns orf list with found orfs \n\n def reverseFrame(self): \n '''Finds ORFs in the reverse frame on the complement frame.''' \n \n #bascially same as forward frame... with minor differences\n startList = [] #empty start codon list\n startFound = 0\n codonFound = 0\n\n for frame in range(0, 3): #iterates through each frame (3 nucleotides)\n startFound = 0 #all initializes at 0\n codonFound = 0\n startList = [] \n\n for nucleotide in range(frame, len(self.complementStrand), 3): #iterates first position at each frame\n \n codon = self.complementStrand[nucleotide : nucleotide + 3] #creates codon\n\n #if start codon is found \n if codon in self.startCodons: \n \n startList.append(nucleotide) #append the start codon position\n startFound = 1\n codonFound = 1\n\n #if stop codon is found and there's a start codon\n if codon in self.stopCodons and startFound == 1: \n \n stopPos = len(self.complementStrand) - startList[0] #gets stop position\n startPos = len(self.complementStrand) - (nucleotide + 2) #gets start position\n if frame == 1: \n stopPos += 1\n elif frame == 2: \n stopPos += 2\n \n orfLength = stopPos - startPos + 1 #gets orf length\n orf = [-1 * ((frame%3) + 1), startPos, stopPos, orfLength] #creates orf\n self.orfList.append(orf) #append to orf list \n\n startList = [] #resets start list\n startFound = 0\n codonFound = 1\n\n #dangling stop: no start codon found \n if codonFound == 0 and codon in self.stopCodons: \n \n startPos = len(self.complementStrand) - nucleotide - 2 #gets start length\n stopPos = len(self.complementStrand) #gets stop length (length of complement strand)\n \n orfLength = stopPos - startPos + 1 #gets orf length\n orf = [-1 * ((frame%3) + 1), startPos, stopPos, orfLength] #creates orf \n self.orfList.append(orf) #appends orf\n\n startList = []\n codonFound = 1\n\n #dangling start: no stop found (but start found)\n if startFound == 1: \n startPos = startList[0] + 1 #get start pos \n stopPos = 1 #stop is just the beginning of sequence\n orfLength = stopPos - startPos + 1 #gets length\n orf = [-1 * ((frame%3) + 1), startPos, stopPos, orfLength] #creates orf\n self.orfList.append(orf) #append orf \n\n return self.orfList\n\n################################################################################################################################################\n\nclass NucParams:\n '''Create class NucParams stores the number of amino acid, codon, and nucleotides in dictionaries '''\n rnaCodonTable = {\n # RNA codon table\n # U\n 'UUU': 'F', 'UCU': 'S', 'UAU': 'Y', 'UGU': 'C', # UxU\n 'UUC': 'F', 'UCC': 'S', 'UAC': 'Y', 'UGC': 'C', # UxC\n 'UUA': 'L', 'UCA': 'S', 'UAA': '-', 'UGA': '-', # UxA\n 'UUG': 'L', 'UCG': 'S', 'UAG': '-', 'UGG': 'W', # UxG\n # C\n 'CUU': 'L', 'CCU': 'P', 'CAU': 'H', 'CGU': 'R', # CxU\n 'CUC': 'L', 'CCC': 'P', 'CAC': 'H', 'CGC': 'R', # CxC\n 'CUA': 'L', 'CCA': 'P', 'CAA': 'Q', 'CGA': 'R', # CxA\n 'CUG': 'L', 'CCG': 'P', 'CAG': 'Q', 'CGG': 'R', # CxG\n # A\n 'AUU': 'I', 'ACU': 'T', 'AAU': 'N', 'AGU': 'S', # AxU\n 'AUC': 'I', 'ACC': 'T', 'AAC': 'N', 'AGC': 'S', # AxC\n 'AUA': 'I', 'ACA': 'T', 'AAA': 'K', 'AGA': 'R', # AxA\n 'AUG': 'M', 'ACG': 'T', 'AAG': 'K', 'AGG': 'R', # AxG\n # G\n 'GUU': 'V', 'GCU': 'A', 'GAU': 'D', 'GGU': 'G', # GxU\n 'GUC': 'V', 'GCC': 'A', 'GAC': 'D', 'GGC': 'G', # GxC\n 'GUA': 'V', 'GCA': 'A', 'GAA': 'E', 'GGA': 'G', # GxA\n 'GUG': 'V', 'GCG': 'A', 'GAG': 'E', 'GGG': 'G' # GxG\n }\n dnaCodonTable = {key.replace('U','T'): value for key, value in rnaCodonTable.items()} #dna codon table\n\n def __init__ (self, inString=''):\n '''\n Instantiates the amino acid composition, nucleotide composition, and RNA codon composition \n dictionaries. \n '''\n self.aaCompDict = { #amino acid composition dictionary \n 'A': 0, 'G': 0, 'M': 0, 'S': 0, 'C': 0, \n 'H': 0, 'N': 0, 'T': 0, 'D': 0, 'I': 0,\n 'P': 0, 'V': 0, 'E': 0, 'K': 0, 'Q': 0,\n 'W': 0, 'F': 0, 'L': 0, 'R': 0, 'Y': 0,\n '-': 0\n }\n \n self.nucCompDict = {'A': 0, 'T': 0, 'G': 0, 'C': 0, 'U': 0, 'N': 0} #nucleotide composition dictionary\n\n self.rnaCodonCompDict = { #RNA codon composition dictionary\n 'UUU': 0, 'UCU': 0, 'UAU': 0, 'UGU': 0, \n 'UUC': 0, 'UCC': 0, 'UAC': 0, 'UGC': 0, \n 'UUA': 0, 'UCA': 0, 'UAA': 0, 'UGA': 0, \n 'UUG': 0, 'UCG': 0, 'UAG': 0, 'UGG': 0, \n \n 'CUU': 0, 'CCU': 0, 'CAU': 0, 'CGU': 0, \n 'CUC': 0, 'CCC': 0, 'CAC': 0, 'CGC': 0, \n 'CUA': 0, 'CCA': 0, 'CAA': 0, 'CGA': 0, \n 'CUG': 0, 'CCG': 0, 'CAG': 0, 'CGG': 0, \n \n 'AUU': 0, 'ACU': 0, 'AAU': 0, 'AGU': 0, \n 'AUC': 0, 'ACC': 0, 'AAC': 0, 'AGC': 0, \n 'AUA': 0, 'ACA': 0, 'AAA': 0, 'AGA': 0, \n 'AUG': 0, 'ACG': 0, 'AAG': 0, 'AGG': 0, \n \n 'GUU': 0, 'GCU': 0, 'GAU': 0, 'GGU': 0, \n 'GUC': 0, 'GCC': 0, 'GAC': 0, 'GGC': 0, \n 'GUA': 0, 'GCA': 0, 'GAA': 0, 'GGA': 0, \n 'GUG': 0, 'GCG': 0, 'GAG': 0, 'GGG': 0, \n }\n\n self.dnaCodonCompDict = {key.replace('U','T'): value for key, value in self.rnaCodonCompDict.items()}\n \n def addSequence (self, inSeq):\n '''Accepts new sequences and stores nucleotide and RNA codon info in respective dictionaries.'''\n \n upperInSeq = inSeq.upper() #capitalizes the input string.\n\n #counts the nucleotides.\n for nucleotides in upperInSeq: \n if nucleotides in self.nucCompDict:\n self.nucCompDict[nucleotides] += 1\n\n #counts RNA codons.\n for nucleotide in range(0, len(upperInSeq), 3): \n codon = upperInSeq[nucleotide: nucleotide + 3] #defining codons\n rnaCodon = codon.replace('T','U')\n if rnaCodon in self.rnaCodonTable.keys(): \n self.rnaCodonCompDict[rnaCodon] += 1 #update in rnaCompDict\n self.aaCompDict[self.rnaCodonTable[rnaCodon]] += 1 #update in aaCompDict \n\n def aaComposition(self):\n '''Returns the amino acid composition dictionary.'''\n return self.aaCompDict\n def nucComposition(self):\n '''Returns the nucleotide composition dictionary.'''\n return self.nucCompDict\n def codonComposition(self):\n '''Returns the codon composition dictionary.'''\n return self.rnaCodonCompDict\n def nucCount(self):\n '''Returns the sum of all values in the nucleotide composition dictionary (Length of sequence).'''\n return sum(self.nucCompDict.values())\n\n\n################################################################################################################################################\n\nclass ProteinParam :\n '''Create class ProteinParam that includes methods that analyze the user input protein sequence.'''\n \n aa2mw = { #molecular weight of each amino acid. \n 'A': 89.093, 'G': 75.067, 'M': 149.211, 'S': 105.093, 'C': 121.158,\n 'H': 155.155, 'N': 132.118, 'T': 119.119, 'D': 133.103, 'I': 131.173,\n 'P': 115.131, 'V': 117.146, 'E': 147.129, 'K': 146.188, 'Q': 146.145,\n 'W': 204.225, 'F': 165.189, 'L': 131.173, 'R': 174.201, 'Y': 181.189\n }\n\n mwH2O = 18.015 #molecular weight of H2O\n aa2abs280= {'Y':1490, 'W': 5500, 'C': 125} #absorbance at 280 nm.\n\n aa2chargePos = {'K': 10.5, 'R':12.4, 'H':6} #pKa of positively charged Amino Acids\n aa2chargeNeg = {'D': 3.86, 'E': 4.25, 'C': 8.33, 'Y': 10} #pKa of negatively charged Amino acids.\n \n aaNterm = 9.69 #pKa of the N terminis.\n aaCterm = 2.34 #pKa of the C terminis.\n\n def __init__ (self, protein):\n '''Creates a amino acid composition dictionary and computes the number of of each amino acid.'''\n upperProtein = protein.upper() #Upper cased sequence. \n\n #Dictionary for every amino acid, starts with 0\n self.aaCompDict = {\n 'A': 0, 'G': 0, 'M': 0, 'S': 0, 'C': 0, \n 'H': 0, 'N': 0, 'T': 0, 'D': 0, 'I': 0,\n 'P': 0, 'V': 0, 'E': 0, 'K': 0, 'Q': 0,\n 'W': 0, 'F': 0, 'L': 0, 'R': 0, 'Y': 0\n }\n \n #For every character in upperProtein (input), if that character is in the dictionary, plus 1.\n #This counts how many valid characters there're in the sequence + disregards invalid characters. \n for aa in upperProtein: \n if aa in self.aaCompDict: \n self.aaCompDict[aa] += 1 \n\n def aaCount (self):\n '''Computes and returns the length of the aa sequence by summing all values in aa dictionary.'''\n aaSeqLenth = sum(self.aaCompDict.values()) #all values in aa dictionary = total length of aa sequence. \n return aaSeqLenth \n \n def pI (self):\n '''Calculates and returns the theoretical isolelectric point. which is the pH that yields a neutral net charge.'''\n currentCharge = 999\n currentpH = 999\n \n for pH in range(0 ,1400): #iterates every pH in range 0-14\n pH = pH/100 \n newCharge = self._charge_(pH) #gets the charge value from _charge_ method\n if newCharge < currentCharge: #if charge used in the charge method is less than current charge,\n if newCharge > 0: #and above 0,\n currentCharge = newCharge #then the value will be current charge\n currentpH = pH #and the pH used in the charge method is the current pH \n\n return currentpH\n\n def aaComposition (self) :\n '''Returns all values in the aa composition dictionary.'''\n return self.aaCompDict\n\n def _charge_ (self, pH):\n '''Calculates and returns the net charge on the protein at a specific pH.'''\n \n #Calculates net Positive charge. \n netPosCharge = 0 #Inititalized at 0.\n for aa in ProteinParam.aa2chargePos.keys(): #adding \n netPosCharge += self.aaCompDict.get(aa) * ((10 ** ProteinParam.aa2chargePos.get(aa))\\\n / (10 ** ProteinParam.aa2chargePos.get(aa) + 10 ** pH))\n netPosCharge += 10 ** ProteinParam.aaNterm / (10 ** ProteinParam.aaNterm + 10 ** pH)\n\n #Calculates net Negative charge.\n netNegCharge = 0\n for aa in ProteinParam.aa2chargeNeg.keys():\n netNegCharge += self.aaCompDict.get(aa) * ((10 ** pH)\\\n / (10 ** ProteinParam.aa2chargeNeg.get(aa) + 10 ** pH))\n netNegCharge += 10 ** pH / (10 ** ProteinParam.aaCterm + 10 ** pH)\n\n #Calculates and returns total net charge.\n netCharge = netPosCharge - netNegCharge\n return netCharge\n\n def molarExtinction (self): \n '''Calculates and returns the molar extinction coefficient.''' \n\n #molar extinction coefficient equation. the value of Y,W,C in aa dict times the abs value at 280nm. \n molarExtinct = (self.aaCompDict.get('Y') * ProteinParam.aa2abs280.get('Y'))\\\n + (self.aaCompDict.get('W') * ProteinParam.aa2abs280.get('W'))\\\n + (self.aaCompDict.get('C') * ProteinParam.aa2abs280.get('C')) \n return molarExtinct\n\n def massExtinction (self):\n '''Calculates and returns the Mass extinction coefficient.'''\n myMW = self.molecularWeight()\n return self.molarExtinction() / myMW if myMW else 0.0 \n\n def molecularWeight (self):\n '''Calculates and returns the molecular weight of the protein sequence.'''\n aaMolarWeight = 0 #mw initialized at 0 \n \n #for every amino acid in aa dictionary, the value of each aa is mutiplied with mw of the aa\n #minus the mw of water that are released with peptide bond formation. \n for aa in self.aaCompDict.keys(): \n aaMolarWeight += self.aaCompDict[aa] * (ProteinParam.aa2mw[aa] - ProteinParam.mwH2O) \n \n finalMolarWeight = ProteinParam.mwH2O + aaMolarWeight\n return finalMolarWeight\n\n################################################################################################################################################\n\nimport sys\nclass FastAreader :\n ''' \n Define objects to read FastA files.\n \n instantiation: \n thisReader = FastAreader ('testTiny.fa')\n usage:\n for head, seq in thisReader.readFasta():\n print (head,seq)\n '''\n def __init__ (self, fname=''):\n '''contructor: saves attribute fname '''\n self.fname = fname\n \n def doOpen (self):\n ''' Handle file opens, allowing STDIN.'''\n if self.fname is '':\n return sys.stdin\n else:\n return open(self.fname)\n \n def readFasta (self):\n ''' Read an entire FastA record and return the sequence header/sequence'''\n header = ''\n sequence = ''\n \n with self.doOpen() as fileH:\n \n header = ''\n sequence = ''\n \n # skip to first fasta header\n line = fileH.readline()\n\n while not line.startswith('>') :\n if not line: # we are at EOF\n return header, sequence\n line = fileH.readline()\n\n header = line[1:].rstrip()\n\n for line in fileH:\n if line.startswith ('>'):\n yield header,sequence\n header = line[1:].rstrip()\n sequence = ''\n else :\n sequence += ''.join(line.rstrip().split()).upper()\n\n yield header,sequence\n\n","sub_path":"sequenceAnalysis.py","file_name":"sequenceAnalysis.py","file_ext":"py","file_size_in_byte":20128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"193058908","text":"# -*- coding:ascii -*-\nfrom mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 10\n_modified_time = 1428048257.136023\n_enable_loop = True\n_template_filename = 'C:\\\\Python34\\\\Projects\\\\colonial\\\\rental\\\\templates/rentals.edit.html'\n_template_uri = 'rentals.edit.html'\n_source_encoding = 'ascii'\nimport os, os.path, re\n_exports = ['top_banner', 'contents']\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[(__name__, name)]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[(__name__, name)]\ndef _mako_generate_namespaces(context):\n pass\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, '/home/templates/base.htm', _template_uri)\ndef render_body(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n rental = context.get('rental', UNDEFINED)\n def top_banner():\n return render_top_banner(context._locals(__M_locals))\n def contents():\n return render_contents(context._locals(__M_locals))\n form = context.get('form', UNDEFINED)\n request = context.get('request', UNDEFINED)\n __M_writer = context.writer()\n __M_writer('\\r\\n\\r\\n')\n if 'parent' not in context._data or not hasattr(context._data['parent'], 'top_banner'):\n context['self'].top_banner(**pageargs)\n \n\n __M_writer('\\r\\n\\r\\n\\r\\n')\n if 'parent' not in context._data or not hasattr(context._data['parent'], 'contents'):\n context['self'].contents(**pageargs)\n \n\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_top_banner(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n def top_banner():\n return render_top_banner(context)\n __M_writer = context.writer()\n __M_writer('\\r\\n\\t
    Edit Rental
    \\r\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_contents(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n rental = context.get('rental', UNDEFINED)\n def contents():\n return render_contents(context)\n form = context.get('form', UNDEFINED)\n request = context.get('request', UNDEFINED)\n __M_writer = context.writer()\n __M_writer('\\r\\n\\r\\n')\n if request.user.has_perm('home.agent'):\n __M_writer('\\r\\n\\t\\t
    \\r\\n\\r\\n\\t\\t\\t\\r\\n\\r\\n\\t\\t\\t\\t')\n __M_writer(str( form ))\n __M_writer('\\r\\n\\r\\n\\r\\n\\t\\t\\t
    \\r\\n\\t\\t\\t
    \\r\\n\\t\\t\\t\\t\\r\\n\\t\\t\\t\\tEdit Fee\\r\\n\\t\\t\\t
    \\r\\n\\t\\t
    \\r\\n\\r\\n')\n else:\n __M_writer('\\t

    Down for maintenance

    \\r\\n')\n __M_writer('\\r\\n\\r\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n\"\"\"\n__M_BEGIN_METADATA\n{\"source_encoding\": \"ascii\", \"filename\": \"C:\\\\Python34\\\\Projects\\\\colonial\\\\rental\\\\templates/rentals.edit.html\", \"uri\": \"rentals.edit.html\", \"line_map\": {\"66\": 8, \"39\": 1, \"44\": 5, \"75\": 8, \"76\": 10, \"77\": 11, \"78\": 16, \"79\": 16, \"80\": 22, \"81\": 22, \"82\": 26, \"83\": 27, \"84\": 29, \"54\": 3, \"90\": 84, \"27\": 0, \"60\": 3}}\n__M_END_METADATA\n\"\"\"\n","sub_path":"rental/cached_templates/templates/rentals.edit.html.py","file_name":"rentals.edit.html.py","file_ext":"py","file_size_in_byte":3689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"265174992","text":"import torch\nfrom tqdm import tqdm\nimport torch.nn.functional as F\nfrom utils.setup import get_device\nfrom collections import defaultdict\n\ndef test(\n model, device, \n test_loader, \n criterion,\n epoch,\n lr_scheduler=None\n):\n model.eval()\n \n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n y_pred = model(data)\n test_loss += criterion(y_pred, target)\n pred = y_pred.argmax(dim=1, keepdim=True) \n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n test_loss = test_loss.item()\n test_acc = 100. * correct / len(test_loader.dataset)\n \n print(\n f'TEST \\\n Loss:{test_loss:.4f} \\\n Acc:{test_acc:.2f} \\\n [{correct} / {len(test_loader.dataset)}]'\n )\n \n return test_loss, test_acc\n\n\ndef get_sample_predictions(trainer, correct_samples=20, mistake_samples=20):\n device = get_device()\n selected_preds = defaultdict(lambda : defaultdict(list))\n with torch.no_grad():\n for (data, target), (data_n, _) in tqdm(\n zip(trainer.test_loader, trainer.test_loader_unnormalized),\n desc='Generating sample predictions'\n ):\n data, target = data.to(device), target.to(device)\n y_pred = trainer.net(data)\n pred = y_pred.argmax(dim=1, keepdim=True)\n correctness = pred.eq(target.view_as(pred))\n\n for n, correct in enumerate(correctness):\n actual_class = target[n].item()\n pred_class = pred[n].item()\n scores = y_pred[n].cpu().numpy()\n\n temp_content = {\n 'pred_class': pred_class,\n 'scores': scores,\n 'data': data[n].cpu(),\n 'data_unnormalized': data_n[n].cpu(),\n 'actual_class': actual_class,\n 'pred_class': pred_class, \n }\n\n if correct[0].item():\n if len(selected_preds['correct'][actual_class]) >= correct_samples:\n continue\n selected_preds['correct'][actual_class].append(temp_content)\n else:\n if len(selected_preds['mistakes'][actual_class]) >= mistake_samples:\n continue\n selected_preds['mistakes'][actual_class].append(temp_content)\n return selected_preds","sub_path":"utils/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"374708240","text":"import streamlit as st\nfrom PIL import Image\nimport requests\nimport base64\nimport os\n\ndef get_binary_file_downloader_html(bin_file, file_label='File'):\n with open(bin_file, 'rb') as f:\n data = f.read()\n bin_str = base64.b64encode(data).decode()\n href = f'[ Download Image ]'\n return href\n\n\ndef enlarge_image():\n image_file = st.file_uploader(\"Upload Image\", type = ['jpg','png','jpeg'])\n\n if image_file is None:\n st.warning(\"Please Upload an Image\")\n\n if image_file is not None:\n image1 = Image.open(image_file)\n rgb_im = image1.convert('RGB')\n image = rgb_im.save(\"saved_image.jpg\")\n image_path = \"saved_image.jpg\"\n st.image(image1,width = 400)\n\n if st.sidebar.button(\"Enhance Resolution 🔍\"):\n if image_file is not None:\n st.warning(\"Please Wait⌛....Artistic Work in progress 🎨🎭👨‍🎨 \")\n r = requests.post(\n \"https://api.deepai.org/api/waifu2x\",\n files={\n 'image': open('saved_image.jpg', 'rb'),\n },\n headers={'api-key': '8f0499fe-bf0f-455d-9f4b-3acb442b49c4'}\n )\n\n color_image_url = r.json()[\"output_url\"]\n\n img_data = requests.get(color_image_url).content\n with open('color_image.jpg', 'wb') as handler:\n handler.write(img_data)\n st.success(\"Image Enhancement Successfull 🙌🥳🎉\")\n color_image = Image.open('color_image.jpg')\n\n st.subheader(\"Colorized Image\")\n st.image(color_image, width=400)\n\n st.markdown(get_binary_file_downloader_html('color_image.jpg', 'Picture'), unsafe_allow_html=True)\n\n else:\n st.error(\"Please Upload Image!!!\")\n","sub_path":"app/super_resolution/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"126069008","text":"import os\nimport pandas as pd\n\nfrom compute_uptake import compute_uptake\nfrom groups import at_risk_groups\nfrom custom_operations import invert_df\n\n\ndemographic_cols = [\n \"age_band\",\n \"sex\",\n #\"ethnicity\",\n \"high_level_ethnicity\",\n \"imd_band\",\n]\n#at_risk_cols = [\"atrisk_group\"] #+ list(at_risk_groups)\nother_cols = [\"preg_group\", \"sevment_group\", \"learndis_group\", \"immuno_group\"]\n\ncols = demographic_cols + other_cols #+ at_risk_cols\n\n\ndef run(input_path=\"output/cohort.pickle\", output_dir=\"output\"):\n backend = os.getenv(\"OPENSAFELY_BACKEND\", \"expectations\")\n base_path = f\"{output_dir}/{backend}/cumulative_coverage\"\n cohort = pd.read_pickle(input_path)\n\n for event_col, key in [\n (\"vacc1_dat\", \"dose_1\"),\n (\"vacc_any_record_dat\", \"any_vaccine_record\"),\n (\"decl_dat\", \"declined\"),\n #(\"cov2not_dat\", \"vaccine_not_done\"),\n\t #(\"cov1decl_acc_dat\", \"declined_accepted\"),\n ]:\n\n # Compute uptake by wave\n dir_path = f\"{base_path}/all/{key}\"\n os.makedirs(dir_path, exist_ok=True)\n uptake = compute_uptake(cohort, event_col, \"wave\")\n uptake.to_csv(f\"{dir_path}/all_{key}_by_group.csv\")\n\n # Compute uptake by broader waves (1-3)\n uptake_w2 = compute_uptake(cohort, event_col, \"wave2\")\n uptake_w2.to_csv(f\"{dir_path}/all_{key}_by_group2.csv\")\n\n # for \"any vaccine record\" calculate the inverse ie. no of patients with NO vaccine related record\n if event_col == \"vacc_any_record_dat\":\n uptake_inv = invert_df(uptake)\n uptake2_inv = invert_df(uptake_w2)\n out_path = f\"{base_path}/all/unreached\"\n os.makedirs(out_path, exist_ok=True)\n uptake_inv.to_csv(f\"{out_path}/all_unreached_by_group.csv\")\n uptake2_inv.to_csv(f\"{out_path}/all_unreached_by_group2.csv\")\n\n # For each wave, compute uptake by column\n for wave in range(1, 9 + 1):\n group_type=\"\"\n compute_uptake_for_wave(cohort, wave, cols, event_col, key, group_type, base_path, dir_path)\n for wave2 in range(1, 3 + 1):\n group_type = \"2\"\n compute_uptake_for_wave(cohort, wave2, cols, event_col, key, group_type, base_path, dir_path)\n\n\ndef compute_uptake_for_wave(cohort, wave, cols, event_col, key, group_type, base_path, dir_path):\n os.makedirs(dir_path, exist_ok=True)\n wave_cohort = cohort[cohort[f\"wave{group_type}\"] == wave]\n\n for col in cols:\n dir_path = f\"{base_path}/group{group_type}_{wave}/{key}\"\n os.makedirs(dir_path, exist_ok=True)\n uptake = compute_uptake(wave_cohort, event_col, col)\n if uptake is None:\n continue\n uptake.to_csv(f\"{dir_path}/group_{wave}_{key}_by_{col}.csv\")\n\n if event_col == \"vacc_any_record_dat\":\n uptake2 = invert_df(uptake)\n out_path = f\"{base_path}/group{group_type}_{wave}/unreached\"\n os.makedirs(out_path, exist_ok=True)\n uptake2.to_csv(f\"{out_path}/group_{wave}_unreached_by_{col}.csv\")\n\nif __name__ == \"__main__\":\n run()\n","sub_path":"analysis/compute_uptake_for_paper.py","file_name":"compute_uptake_for_paper.py","file_ext":"py","file_size_in_byte":3077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"188987618","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.test import TestCase\nfrom django.test import Client\nfrom django.urls import reverse\nfrom django.utils.http import urlencode\n\n# Create your tests here.\nclass TestPropertyViews(TestCase):\n fixtures = ['property-testdata.json']\n \n def test_LookupView(self):\n client = Client()\n query = urlencode({'search':'House'})\n url = reverse('properties:lookup') + '?' + query\n response = client.get(url)\n result = response.context['results']\n expected_count = 2\n results_count = len(result)\n self.assertEqual(results_count, expected_count)\n\n def test_DistanceView(self):\n client = Client()\n nmhu = urlencode({'address':'1009 Diamond St, Las Vegas, NM'})\n distance = urlencode({'distance':'100'})\n url = reverse('properties:distance') + '?' + nmhu + '&' + distance\n response = client.get(url)\n result = response.context['results']\n expected_count = 2\n results_count = len(result)\n self.assertEqual(results_count, expected_count)","sub_path":"properties/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"650389830","text":"# Example 16-8 from Fluent Python\n\nclass DemoException(Exception):\n '''Test exception'''\n\ndef demo_handler():\n print('-> coroutine started')\n while True:\n try:\n x = yield\n except DemoException:\n print('*** DemoException handled. Continuing...')\n else:\n print('-> coroutine received {!r}'.format(x))\n\n","sub_path":"ch16_coroutines/exc_demo.py","file_name":"exc_demo.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"415870940","text":"#!/usr/bin/env/ python\n\nimport sqlite3\nimport psycopg2 as pg\nimport csv\nimport pandas as pd\n\n\n# Loading up the CSV in order to edit some of the troublesome names\n# Namely eliminating apostrophes\n\ndef f(x):\n if isinstance(x, str):\n return (#x.replace(' ', '_')\n # .replace('/', '_')\n #.replace(')','')\n # .replace('(','')\n x.replace(\"'\", ''))\n #.replace(\".\", \"\"))\n else:\n return x\ndf = pd.read_csv('titanic.csv').rename(columns=f).applymap(f)\ndf.to_csv(\"titanic_edit.csv\", index=False)\nsl_conn = sqlite3.connect('../joshdsolis/titanic.db')\n\n\ncurs = sl_conn.cursor()\ncurs.execute(\"\"\"CREATE TABLE t (\n Survived int, \n Pclass int, \n Name varchar(500), \n Sex varchar(30), \n Age int, \n Siblings_Spouses_Aboard int, \n Parents_Children_Aboard int, \n Fare float\n);\"\"\")\n\n\nwith open('titanic_edit.csv', 'rt') as fin:\n dr = csv.DictReader(fin)\n to_db = [(i['Survived'], i['Pclass'], i['Name'], i['Sex'],\n i['Age'], i['Siblings/Spouses Aboard'], i['Parents/Children Aboard'],\n i['Fare']) for i in dr]\n\ncurs.executemany(\"\"\"INSERT INTO t (\n Survived, \n Pclass, \n Name, \n Sex, \n Age, \n Siblings_Spouses_Aboard, \n Parents_Children_Aboard, \n Fare) VALUES (?,?,?,?,?,?,?,?);\"\"\", to_db)\n\nsl_conn.commit()\nsl_conn.close()\n\n\ndbname = 'hplkjeta'\nuser ='hplkjeta'\npassword='oZIHUw7MLlfv7STG_kigAzstwcYwUE2z'\nhost = 'stampy.db.elephantsql.com'\n\n \npg_conn = pg.connect(dbname=dbname, user=user,\n password=password, host=host)\n\nsl_conn = sqlite3.connect('../joshdsolis/titanic.db')\nresults = sl_conn.execute('SELECT * FROM t;').fetchall()\n\n\ndef make_and_populate_character_table():\n pg_curs = pg_conn.cursor()\n\n pg_curs.execute(\"\"\"CREATE TABLE titanic2 (\n Survived int, \n Pclass int, \n Name varchar(500), \n Sex varchar(30), \n Age int, \n Siblings_Spouses_Aboard int, \n Parents_Children_Aboard int, \n Fare float\n );\"\"\")\n\n for result in results:\n insert_result = \"\"\"INSERT INTO titanic2\n (Survived, Pclass, Name, Sex, Age, Siblings_Spouses_Aboard, Parents_Children_Aboard, Fare)\n VALUES\"\"\" + str(result)\n pg_curs.execute(insert_result)\n \n pg_conn.commit()\n","sub_path":"module2-sql-for-analysis/insert_titanic.py","file_name":"insert_titanic.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"220921071","text":"from selenium import webdriver\r\nfrom time import sleep\r\n\r\nbrowser = webdriver.Chrome()\r\n\r\ndef imdb():\r\n # Full-Screen Mode.\r\n browser.maximize_window()\r\n browser.get(\"https://www.imdb.com/chart/top/\")\r\n sleep(1)\r\n browser.find_element_by_xpath(\"//*[@id='main']/div/span/div/div/div[3]/table/tbody/tr[47]/td[2]/a\").click()\r\n sleep(1)\r\n # Saves a screenshot.\r\n browser.save_screenshot(\"movie(47).png\")\r\n sleep(1)\r\n # Returns to the previous page.\r\n browser.back()\r\n sleep(1)\r\n browser.find_element_by_xpath(\"//*[@id='main']/div/span/div/div/div[3]/table/tbody/tr[1]/td[2]/a\").click()\r\n sleep(1)\r\n browser.save_screenshot(\"movie(1).png\")\r\n sleep(1)\r\n # Closes the current tab.\r\n browser.close()\r\n\r\nimdb()","sub_path":"src/Selenium/Explorer.py","file_name":"Explorer.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"91703393","text":"# import os\n# 0 = all messages are logged (default behavior)\n# 1 = INFO messages are not printed\n# 2 = INFO and WARNING messages are not printed\n# 3 = INFO, WARNING, and ERROR messages are not printed\n# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0'\n# os.environ['TF_CPP_MIN_VLOG_LEVEL'] = '0'\nfrom transformers import BertTokenizer\nimport tensorflow as tf\nfrom absl import logging\nfrom absl import flags\nfrom absl import app\nimport logging as logger\nimport google.cloud.logging\n\nimport sys\n\nFLAGS = flags.FLAGS\nflags.DEFINE_enum('verbosity_level', 'INFO', ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'FATAL'], 'verbosity in the logfile')\n\n\ndef main(argv):\n\n # set level of verbosity\n if FLAGS.verbosity_level == 'DEBUG':\n logging.set_verbosity(logging.DEBUG)\n print('logging.DEBUG')\n elif FLAGS.verbosity_level == 'INFO':\n logging.set_verbosity(logging.INFO)\n print('logging.INFO')\n elif FLAGS.verbosity_level == 'WARNING':\n logging.set_verbosity(logging.WARNING)\n print('logging.WARNING')\n elif FLAGS.verbosity_level == 'ERROR':\n logging.set_verbosity(logging.ERROR)\n print('logging.ERROR')\n elif FLAGS.verbosity_level == 'FATAL':\n logging.set_verbosity(logging.FATAL)\n print('logging.FATAL')\n else:\n logging.set_verbosity(logging.INFO)\n print('logging.DEFAULT -> INFO')\n\n # logging.get_absl_handler().python_handler.stream = sys.stdout\n\n # Instantiates a client\n client = google.cloud.logging.Client()\n\n # Connects the logger to the root logging handler; by default this captures\n # all logs at INFO level and higher\n client.setup_logging()\n\n fmt = \"[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s\"\n formatter = logger.Formatter(fmt)\n logging.get_absl_handler().setFormatter(formatter)\n\n # set level of verbosity\n # logging.set_verbosity(logging.DEBUG)\n\n # logging.set_stderrthreshold(logging.WARNING)\n # logging._warn_preinit_stderr = True\n # loggers = [logger.getLogger()] # get the root logger\n\n # for handler in loggers:\n # print(\"handler \", handler)\n # print(\" handler.level--> \", handler.level)\n # print(\" handler.name--> \", handler.name)\n # print(\" handler.propagate--> \", handler.propagate)\n # print(\" handler.parent--> \", handler.parent )\n # print(dir(handler))\n # level_log = 'INFO'\n # root_logger = logger.getLogger()\n # root_logger.handlers=[handler for handler in root_logger.handlers if isinstance(handler, (CloudLoggingHandler, ContainerEngineHandler, logging.ABSLHandler))]\n #\n # for handler in root_logger.handlers:\n # print(\"----- handler \", handler)\n # print(\"---------class \", handler.__class__)\n # if handler.__class__ == logging.ABSLHandler:\n # handler.python_handler.stream = sys.stdout\n # #handler.handler.setStream(sys.stdout)\n tf.get_logger().propagate = False\n root_logger = logger.getLogger()\n print(' root_logger :', root_logger)\n print(' root_logger.handlers :', root_logger.handlers)\n print(' len(root_logger) :', len(root_logger.handlers))\n for h in root_logger.handlers:\n print('handlers:', h)\n print(\"---------class \", h.__class__)\n if h.__class__ == logging.ABSLHandler:\n print('++logging.ABSLHandler')\n h.python_handler.stream = sys.stdout\n h.setLevel(logger.INFO)\n if h.__class__ == google.cloud.logging.handlers.handlers.CloudLoggingHandler:\n print('++CloudLoggingHandler')\n h.setLevel(logger.CRITICAL)\n h.setStream(sys.stdout)\n logger.getLogger().addHandler(h)\n if h.__class__ == logger.StreamHandler:\n print('++logging.StreamHandler')\n h.setLevel(logger.CRITICAL)\n h.setStream(sys.stdout)\n logger.getLogger().addHandler(h)\n\n logging.set_stderrthreshold(logging.WARNING)\n # handler = client.get_default_handler()\n # print('hhh', handler)\n # logger.getLogger().setLevel(logger.INFO)\n # logger.getLogger().addHandler(handler)\n\n # handler = logger.StreamHandler(sys.stderr)\n # handler.setLevel(logger.CRITICAL)\n # logger.getLogger().addHandler(handler)\n\n # handler = logger.StreamHandler(sys.stdout)\n # handler.setLevel(logger.CRITICAL)\n # logger.getLogger().addHandler(handler)\n\n print(' 0 print --- ')\n logging.info(' 1 logging:')\n logging.info(' 2 logging:')\n\n print(' 3 print --- ')\n logging.debug(' 4 logging-test-debug')\n logging.info(' 5 logging-test-info')\n logging.warning(' 6 logging-test-warning')\n logging.error(' 7 logging test-error')\n print(' 8 print --- ')\n _ = BertTokenizer.from_pretrained('bert-base-uncased')\n print(' 9 print --- ')\n _ = tf.distribute.MirroredStrategy()\n print('10 print --- ')\n\n\nif __name__ == '__main__':\n app.run(main)\n","sub_path":"src/model/test_log/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":4919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"9921448","text":"#!/usr/bin/env python3\n\nfrom logging import error, warning, info, INFO, basicConfig\nfrom os import listdir\n\ndependencies = {}\nbase = 'glibc/localedata/locales/'\n#basicConfig(level=INFO)\n\nfor filename in sorted(listdir(base)):\n path = base + filename\n info('Processing locale file {}...'.format(path))\n with open(path) as locale:\n for line in locale:\n line = line[:-1]\n if line.startswith('copy'):\n # next line also supports tab characters\n dependency = ' '.join(line.split()).split(' ')[1]\n if dependency == '':\n error('Could not find dependency in locale {} for line {}'\n .format(filename, line))\n exit(1)\n if dependency[0] == '\"' and dependency[-1] == '\"':\n dependency = dependency[1:-1]\n if filename not in dependencies:\n dependencies[filename] = set()\n dependencies[filename].add(dependency)\n\nreported = [] # prevent reporting reverse direction\nfor destination, sources in sorted(dependencies.items()):\n for source in sources:\n if source in dependencies and destination in dependencies[source] and (source, destination) not in reported:\n warning('Cyclic dependencies via copy found between locales {} and {}'\n .format(destination, source))\n reported.append((destination, source))","sub_path":"analysis/3-cyclic-dependencies.py","file_name":"3-cyclic-dependencies.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"455371535","text":"#encoding:utf-8--\r\nimport requests\r\nfrom lxml import etree\r\nimport re\r\nfrom collections import OrderedDict\r\nimport pandas as pd\r\nimport time\r\nfrom datetime import datetime\r\nimport random\r\nfrom time import sleep\r\nclass WEIBO():\r\n cookie = {\r\n 'Cookie': '_T_WM=52165934645; ALF=1573204852; SCF=ArdMMKY9SBOgWxi4HE1DCrEm8vYkDcTnT_8NIoAFJhr3yiG1ryIrOOKbX6ecfBCNdCFo6T_cvboV37xveAwUh34.; SUB=_2A25wmdaYDeRhGeFP61sV-CvOzTqIHXVQZfrQrDV6PUJbktANLUiikW1NQSI-eIFPm_5zxcxo3ah_9S8cH-4Nf-Iy; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9W54z7GLQ_uRCDy3AoKHpPxB5JpX5K-hUgL.FoMpeh.X1h-ESoq2dJLoIp7LxKML1KBLBKnLxKqL1hnLBoMNeK54Shnfeoqc; SUHB=0OMk-etS2Ek-ET; SSOLoginState=1570612936'}\r\n def __init__(self):#初始化\r\n self.weibo_num=0\r\n self.weibo=[]\r\n def deal_html(self,url):#处理html\r\n html=requests.get(url,cookies=self.cookie).content\r\n selector=etree.HTML(html)\r\n return selector\r\n def get_id(self,info):#获取id信息\r\n id=info.xpath('@id')[0][2:]\r\n return id\r\n def get_date(self,info):#获取发表日期\r\n times = info.xpath('div/span[@class=\"ct\"]/text()')#注意xpath的语法,出错一点,则不能读出数据\r\n times = ''.join(times)\r\n date = str(times[:times.find(' ')])\r\n #if u'今' in date:\r\n # date=time.strftime(\"%m月%d日\",time.localtime(time.time()))\r\n if u'今天' in times or u'分钟' in times or u'刚刚' in times:\r\n month=datetime.now().strftime('%m')\r\n day=datetime.now().strftime('%d')\r\n date = month+'月'+day+'日'\r\n\r\n return date\r\n def get_name(self,info):#获取发表名字\r\n name=info.xpath('div/a[@class=\"nk\"]/text()')[0]\r\n return name\r\n\r\n def get_content(self,info):#获取内容\r\n content=''.join(info.xpath('div//text()'))\r\n contents = content[content.find(':') + 1:content.find(u'赞')]\r\n return contents\r\n def get_fonter(self,info):#获取点赞评论转发\r\n pattern = r'\\d+'\r\n halfcontent = info.xpath('div/a/text()')\r\n halfcontent = ''.join(halfcontent)\r\n foot = halfcontent[halfcontent.find(u'赞'):halfcontent.find(u'收藏')]\r\n foots = re.findall(pattern, foot)\r\n return foots\r\n def printAweibo(self,info,k):#打印获取的信息\r\n print(self.word_list[k])\r\n print(self.get_id(info))\r\n print(self.get_date(info))\r\n print(self.get_name(info))\r\n print(self.get_content(info))\r\n print(\"点赞数:\"+self.get_fonter(info)[0])\r\n print(\"转发���:\" + self.get_fonter(info)[1])\r\n print(\"评论数:\" + self.get_fonter(info)[2])\r\n def get_weibo_tuple(self,info,k):#获取微博信息的元组\r\n weibo=OrderedDict()\r\n weibo['user id']=self.get_id(info)\r\n weibo['weibo keyword']=self.word_list[k]\r\n weibo['send date']=self.get_date(info)\r\n weibo['user name']=self.get_name(info)\r\n weibo['weibo content']=self.get_content(info)\r\n weibo['weibo support']=self.get_fonter(info)[0]\r\n weibo['weibo transpound']=self.get_fonter(info)[1]\r\n weibo['weibo comment']=self.get_fonter(info)[2]\r\n return weibo\r\n def get_pagenum(self,k):#获取微博的页数\r\n try:\r\n url = 'https://weibo.cn/search/mblog?hideSearchFrame=&keyword=%s&advancedfilter=1&starttime=20190920&endtime=20191008&sort=hot' % (self.word_list[k])\r\n html = self.deal_html(url)\r\n pageNum = html.xpath('//div[@class=\"pa\"]/form/div/input[@name=\"mp\"]')[0].attrib['value']\r\n pageNum = int(pageNum)\r\n return pageNum\r\n except:\r\n pass\r\n def get_keywordlist(self):\r\n with open(self.filename, 'r', encoding='utf8') as f:\r\n self.word_list = f.read()\r\n self.word_list=eval(self.word_list)#字符串转换为列表\r\n self.word_num=len(self.word_list)\r\n def deal_url(self,words,pageNum): # 以后要修改,网址\r\n #确定微博发布的时间段20190920到20191008\r\n urls='https://weibo.cn/search/mblog?hideSearchFrame=&keyword=%s&advancedfilter=1&starttime=20190920&endtime=20191008&sort=hot&page=%d'%(words,pageNum)\r\n return urls\r\n def write_weibo(self,info,k):#把元组信息写入列表\r\n weibo=self.get_weibo_tuple(info,k)\r\n self.weibo.append(weibo)\r\n def get_pageweibo(self, url,k):#获取一页的微博\r\n #容错处理,否则会出现'NoneType' object has no attribute 'xpath'\r\n self.selector = self.deal_html(url)\r\n info = self.selector.xpath(\"//div[@class='c']\")\r\n for i in range(2, len(info) - 2):\r\n try:\r\n self.weibo_num += 1\r\n print(self.weibo_num)\r\n self.write_weibo(info[i],k)\r\n self.printAweibo(info[i],k)\r\n print(\"-----\" * 100)\r\n except:\r\n continue\r\n def write_csv(self,keepfile):#写入csv文件\r\n filename=keepfile\r\n DataFrame=pd.DataFrame(self.weibo,columns=['user id','weibo keyword','send date','user name','weibo support','weibo transpound','weibo comment','weibo content'])\r\n DataFrame.to_csv(filename,index=False,sep=',')\r\n\r\n def start(self, filename, keepfilename): # 运行爬虫\r\n self.filename = filename\r\n self.get_keywordlist()\r\n for k in range(0, self.word_num - 1):\r\n try:\r\n\r\n num = self.get_pagenum(k)\r\n pagenum = 0\r\n randompage = random.randint(1, 3)\r\n #randompage=1\r\n for j in range(1, num):#\r\n # 设置爬虫睡眠数据避免被系统限制\r\n try:\r\n if j < num and j == pagenum + randompage:\r\n sleep(random.randint(25, 30))\r\n url = self.deal_url(self.word_list[k], j)\r\n self.get_pageweibo(url, k)\r\n pagenum += 1\r\n except:\r\n continue\r\n\r\n except:\r\n continue\r\n print(self.weibo)\r\n self.write_csv(keepfilename)\r\n print(u'共爬取' + str(self.weibo_num) + u'条微博')\r\n\r\n\r\nd=WEIBO()\r\nd.start('2019-10-04newAge.txt','data4.csv')\r\n#第一个是读取文件的路径\r\n#第二个是存储路径\r\n","sub_path":"catch3.py","file_name":"catch3.py","file_ext":"py","file_size_in_byte":6397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"81920039","text":"# -*- coding: utf-8 -*-\nimport datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n # Adding index on 'CronJobLog', fields ['end_time']\n db.create_index(u'django_cronium_cronjoblog', ['end_time'])\n\n # Adding index on 'CronJobLog', fields ['ran_at_time', 'is_success', 'code']\n db.create_index(u'django_cronium_cronjoblog', ['ran_at_time', 'is_success', 'code'])\n\n # Adding index on 'CronJobLog', fields ['ran_at_time', 'start_time', 'code']\n db.create_index(u'django_cronium_cronjoblog', ['ran_at_time', 'start_time', 'code'])\n\n # Adding index on 'CronJobLog', fields ['start_time', 'code']\n db.create_index(u'django_cronium_cronjoblog', ['start_time', 'code'])\n\n\n def backwards(self, orm):\n # Removing index on 'CronJobLog', fields ['start_time', 'code']\n db.delete_index(u'django_cronium_cronjoblog', ['start_time', 'code'])\n\n # Removing index on 'CronJobLog', fields ['ran_at_time', 'start_time', 'code']\n db.delete_index(u'django_cronium_cronjoblog', ['ran_at_time', 'start_time', 'code'])\n\n # Removing index on 'CronJobLog', fields ['ran_at_time', 'is_success', 'code']\n db.delete_index(u'django_cronium_cronjoblog', ['ran_at_time', 'is_success', 'code'])\n\n # Removing index on 'CronJobLog', fields ['end_time']\n db.delete_index(u'django_cronium_cronjoblog', ['end_time'])\n\n\n models = {\n u'django_cronium.cronjoblog': {\n 'Meta': {'object_name': 'CronJobLog', 'index_together': \"[('code', 'is_success', 'ran_at_time'), ('code', 'start_time', 'ran_at_time'), ('code', 'start_time')]\"},\n 'code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),\n 'end_time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'is_success': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'message': ('django.db.models.fields.TextField', [], {'max_length': '1000', 'blank': 'True'}),\n 'ran_at_time': ('django.db.models.fields.TimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),\n 'start_time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})\n }\n }\n\n complete_apps = ['django_cronium']","sub_path":"django_cronium/migrations/0003_auto__add_index_cronjoblog_end_time__add_index_cronjoblog_ran_at_time_.py","file_name":"0003_auto__add_index_cronjoblog_end_time__add_index_cronjoblog_ran_at_time_.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"514474513","text":"from flask import request\nfrom flask_restful import Resource, fields, marshal_with\n\nfrom domain.user import UserService\nfrom api.restful.common.api_models import marshal_wrapper, ResponseEntity\nfrom storage.user_storage import UserStorageMgo\n\nbase_fields = {\n 'user_id': fields.String(attribute='_id'),\n 'created': fields.Integer,\n 'updated': fields.Integer,\n 'active': fields.Integer,\n\n 'name': fields.String,\n 'pwd': fields.String,\n 'email': fields.String,\n 'phone': fields.String,\n 'nick_name': fields.String,\n}\n\ndetail_marshal = marshal_wrapper(base_fields)\n\n\nclass LoginApi(Resource):\n def __init__(self):\n self.user_service = UserService(UserStorageMgo())\n\n @marshal_with(detail_marshal)\n def post(self):\n data = request.get_json()\n user = self.user_service.login(data['email'], data['pwd'])\n return ResponseEntity(data=user)\n","sub_path":"api/restful/resources/sys_api.py","file_name":"sys_api.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"5895325","text":"import abc\n\nfrom google.appengine.ext.ndb.key import Key\n\nimport wtforms\nimport wtforms_components\nimport datetime\n\nfrom control import base_auth_response\nfrom flask import flash, redirect\nfrom flask_wtf import Form\n\nfrom flask_restful import Resource\n\nfrom auth import auth\nfrom main import api, app\nfrom model import *\n\n\nclass BaseForm(Form):\n\t@classmethod\n\tdef append_field(cls, name, field):\n\t\tsetattr(cls, name, field)\n\t\treturn cls\n\n\nclass KeyField(wtforms.HiddenField):\n\tdef populate_obj(self, obj, name):\n\t\tsetattr(obj, name, Key(urlsafe=self.data))\n\n\ndef put_value(metric, value):\n\treturn metric(user_key=auth.current_user_key(), value=value).put()\n\n\nclass DurationField(wtforms.StringField):\n\tdef populate_obj(self, obj, name):\n\t\tsplit = self.data.split(':')\n\t\tvalue = int(split[0]) * 3600 + int(split[1]) * 60 + int(split[2])\n\t\tsetattr(obj, name, put_value(DurationMetric, value))\n\n\nclass CountField(wtforms.IntegerField):\n\tdef populate_obj(self, obj, name):\n\t\tsetattr(obj, name, put_value(CountMetric, self.data))\n\n\nclass DecimalField(wtforms.FloatField):\n\tdef populate_obj(self, obj, name):\n\t\tsetattr(obj, name, put_value(DecimalMetric, self.data))\n\n\nclass RecordForm(BaseForm):\n\tactivity_key = KeyField('Activity', [wtforms.validators.required()])\n\tcategory_key = KeyField('Category', [wtforms.validators.required()])\n\tmetric_key = KeyField('Metric', [wtforms.validators.required()])\n\tdate = wtforms.DateField('Date', [wtforms.validators.optional()])\n\tnotes = wtforms.TextAreaField('Notes', [wtforms.validators.optional()])\n\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(RecordForm, self).__init__(*args, **kwargs)\n\t\tcats, _ = BaseCategory.get_dbs()\n\n\tdef new(self, activity):\n\t\tself.activity_key.data = activity.key.urlsafe()\n\t\tself.category_key.data = activity.category_key.urlsafe()\n\t\tself.metric_key.data = activity.metric_key.urlsafe()\n\t\tself.value.label.text = activity.metric_key.get().name\n\t\treturn self\n\n\tdef edit(self, record):\n\t\tself.activity_key.data = record.activity.urlsafe()\n\t\tself.category_key.data = record.category.urlsafe()\n\t\tself.value.data = record.value.get().value\n\t\tself.date.data = record.date\n\t\tself.notes.data = record.notes\n\t\treturn self\n\n\nclass TimeRecordForm(RecordForm):\n\tvalue = DurationField('', [wtforms.validators.required(),\n\t wtforms.validators.regexp(r'^((?:[01]\\d|2[0-3]):[0-5]\\d:[0-5]\\d$)')])\n\n\nclass DecimalRecordForm(RecordForm):\n\tvalue = DecimalField('', [wtforms.validators.required(), wtforms.validators.number_range(min=0)])\n\n\nclass CountRecordForm(RecordForm):\n\tvalue = CountField('', [wtforms.validators.required(), wtforms.validators.number_range(min=0)])\n\n\nclass Activities(Resource):\n\t@auth.login_required\n\tdef get(self, category_key):\n\t\tuser_key = auth.current_user_key()\n\t\tcat = Key(urlsafe=category_key).get()\n\t\tactivities = BaseActivity.get_dbs(user_key=user_key, tracked=True, category_key=cat.key)[0]\n\t\trecords = [BaseRecord.get_dbs(order='-created', user_key=user_key, activity_key=a.key)[0] for a in activities]\n\t\treturn base_auth_response('records/records.html', category_key=category_key,\n\t\t activities=zip(activities, records))\n\n\ndef select_form(activity):\n\tif activity.metric_key.get().type == 'TimeMetric':\n\t\tform = TimeRecordForm\n\telif activity.metric_key.get().type == 'CountMetric':\n\t\tform = CountRecordForm\n\telif activity.metric_key.get().type == 'DecimalMetric':\n\t\tform = DecimalRecordForm\n\telse:\n\t\traise NotImplementedError()\n\n\tif activity.category_key.get().name == 'Crossfit':\n\t\tform.append_field('rxd', wtforms.BooleanField('RXd'))\n\n\treturn form\n\n\nclass NewRecord(Resource):\n\t@auth.login_required\n\tdef get(self, activity_key):\n\t\tactivity = Key(urlsafe=activity_key).get()\n\t\tform = select_form(activity)().new(activity)\n\t\treturn base_auth_response('records/new_record.html', form=form, activity=activity)\n\n\t@auth.login_required\n\tdef post(self, activity_key):\n\t\tactivity = Key(urlsafe=activity_key).get()\n\t\tform = select_form(activity)().new(activity)\n\t\tif form.validate_on_submit():\n\t\t\tif activity.category_key.get().name == 'Crossfit':\n\t\t\t\tentity = CrossfitRecord()\n\t\t\telse:\n\t\t\t\tentity = BaseRecord()\n\t\t\tform.populate_obj(entity)\n\t\t\tentity.user_key = auth.current_user_key()\n\t\t\tif entity.is_valid_entry(form):\n\t\t\t\tentity.put()\n\t\t\t\tflash('Toevoegen succesvol.', category='success')\n\t\t\t\treturn redirect(api.url_for(Activities, category_key=activity.category_key.urlsafe()))\n\t\tflash('Toevoegen niet gelukt.', category='warning')\n\t\treturn base_auth_response('records/new_record.html', activity=activity, form=form)\n\n\napi.add_resource(Activities, '/activities/')\napi.add_resource(NewRecord, '/activity/record/new/')\n","sub_path":"main/control/records.py","file_name":"records.py","file_ext":"py","file_size_in_byte":4691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"116057031","text":"import misc.globals as globals\nfrom classes.interpretable_symbols.InterpretableSymbol import InterpretableSymbol\n\n\nclass ExpressionInterpreter:\n\n # --------------------- #\n # -- PRIVATE METHODS -- #\n # --------------------- #\n\n @staticmethod\n def __get_symbol(char, prev_number_or_symbol) -> InterpretableSymbol:\n if char in InterpretableSymbol.interpretable_symbols_dict():\n for symbol in InterpretableSymbol.interpretable_symbols_dict()[char]:\n if symbol.check_symbol(char, prev_number_or_symbol):\n return symbol\n raise Exception\n\n @staticmethod\n def __read_digits(char):\n while globals.pos < len(globals.expression) and globals.expression[globals.pos].isdigit():\n char += globals.expression[globals.pos]\n globals.pos += 1\n return int(char)\n\n @staticmethod\n def __get_next_number_or_symbol(prev_number_or_symbol):\n read_digits = ExpressionInterpreter.__read_digits\n get_symbol = ExpressionInterpreter.__get_symbol\n if globals.pos >= len(globals.expression):\n return None, None\n char = globals.expression[globals.pos]\n globals.pos += 1\n return char, read_digits(char) if char.isdigit() else get_symbol(char, prev_number_or_symbol)\n\n @staticmethod\n def __compute_if_needed():\n last_functions = globals.functions[-1]\n while len(last_functions) > 0:\n function = last_functions[-1]\n if function.is_ready_to_compute():\n globals.push_number(function.compute(function.get_arguments()))\n last_functions.pop()\n else:\n break\n\n # --------------------------- #\n # -- PUBLIC STATIC METHODS -- #\n # --------------------------- #\n\n @staticmethod\n def compute(expression_to_compute: str):\n globals.expression = expression_to_compute\n globals.numbers = []\n globals.numbers.append([])\n globals.pos = 0\n globals.functions = []\n globals.functions.append([])\n try:\n char, number_or_symbol = ExpressionInterpreter.__get_next_number_or_symbol(\"\")\n while char:\n if type(number_or_symbol) is int:\n globals.push_number(number_or_symbol)\n ExpressionInterpreter.__compute_if_needed()\n else:\n number_or_symbol.interpret()\n if char == ')':\n ExpressionInterpreter.__compute_if_needed()\n char, number_or_symbol = ExpressionInterpreter.__get_next_number_or_symbol(number_or_symbol)\n except Exception as e:\n return 0\n try:\n return globals.numbers[0][0]\n except Exception as e:\n a = 0\n return 0\n","sub_path":"main/ExpressionInterpreter.py","file_name":"ExpressionInterpreter.py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"484816","text":"def pprint(word2,diction):\n for i in word2:\n print(i,end=\" \")\n print()\n print(len(word2))\n for i in range(len(diction)):\n if(diction[i][1]!=0):\n print(diction[i][0],diction[i][1],end=\" \")\n print()\n\ndef main():\n diction=[]\n while(True): #無限迴圈直到-1\n word = input() #輸入\n delist=[] \n if(word=='-1'): #結束\n word2 = [i for i in input().split()] #輸入英文句子\n for i in range(len(word2)):\n for j in range(len(diction)):\n if(word2[i]==diction[j][0]):\n diction[j][1] += 1\n delist.append(i)\n diction.sort(key=lambda x:(x[0].lower(),x[1]))\n for i in reversed(delist):\n word2.pop(i)\n pprint(word2,diction)\n break\n diction.append([word,0])\n #diction.append([0,len(word),word])\n \n \nmain()\n\n#diction.sort(key=lambda x:(x[0].lower(),x[1]))\n","sub_path":"week3/article.py","file_name":"article.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"485195821","text":"#!/usr/bin/env python\nimport h5py\nimport pandas as pd\nimport numpy as np\nfrom os import getcwd\nfrom os.path import join\nfrom glob import glob\nimport matplotlib.pyplot as plt\nfrom scipy.cluster.vq import kmeans, kmeans2, whiten\n\n\ndef main():\n\n\tcols = [\"Timestep\",\"Time\",\"X_max\",\"Y_max\",\"Vortexnumber\",\"Alpha\",\"Rx\",\"Ry\",\"R_Ratio\",\"E_Major\",\"E_Minor\",\"E_Major_Angle\",\"E_Minor_Angle\",\"E_Ratio\",\"N\",\"V\",\"N/V\",\"E_kin\",\"N_0\"]\n\n\tfig = plt.figure()\n\tfig.set_size_inches(10.0,12.0)\n\n\n\n\tfilenames = glob(join(getcwd(), '*', 'runObservables'))\n\n\tfilenames = [s + '/EXP_Observables.dat' for s in filenames]\n\t\n\trot = []\n\tbefore = \"145_\"\n\tend = \"/runObservables\"\n\tfor s in filenames:\n\t\tname = s[s.find(before) + len(before):s.find(end)]\n\t\trot.append(name)\n\n\t# ratio = []\n\t# for r in rot:\n\t# \tratio.append(float(r)/26.0)\n\tax1 = fig.add_subplot(111)\n\tlength = len(filenames)\n\t# from_data = [None] * length\n\t\n\tfor i in range(0,length):\n\t# \tdataset = []\n\t\tfrom_data = pd.read_csv(filenames[i],header=0,sep=',',names=cols)\n\t\tdataset1 = from_data['Timestep']\n\t\tdataset2 = from_data['E_Major_Angle']\n\t\tprint(dataset2)\n\t\t# dataset3 = from_data['N']\n\t\t# plot1 = []\n\t\t# plot2 = []\n\t\t# average_length = 20\n\t\t# avl2 = int(average_length / 2)\n\t\t# for j in range(avl2,len(dataset1)-avl2):\n\t\t# \tav = 0\n\t\t# \tfor k in range(-avl2,avl2):\n\t\t# \t\tav += ( dataset1[j + k] )\n\t\t# \tav /= average_length\n\t\t# \tplot1.append(av)\n\t\t# \tav = 0\n\t\t# \tfor k in range(-avl2,avl2):\n\t\t# \t\tav += ( dataset2[j + k] )\n\t\t# \tav /= average_length\n\t\t# \tplot2.append(av)\n\n\t\tax1.plot(dataset1,dataset2,label=rot[i] )\n\t\t# str(dataset3[0]) + \" \" + + \" \" + str(ratio[i])\n\n\tplt.ylabel('Angles')\n\tplt.xlabel('Time in ms')\n\tplt.legend(loc='upper right')\n\n\tplt.tight_layout()\t\n\t# fig.text(.001,.001,txt)\n\tplt.savefig('Angles.pdf',dpi=600)\n\tplt.show()\n\ndef average(x, y, z):\n\tav = (x + y + z ) / 3\n\treturn av\n\ndef envelope_plot(x, y, winsize, ax=None, fill='gray', color='blue'):\n if ax is None:\n ax = plt.gca()\n # Coarsely chunk the data, discarding the last window if it's not evenly\n # divisible. (Fast and memory-efficient)\n numwin = x.size // winsize\n ywin = y[:winsize * numwin].reshape(-1, winsize)\n xwin = x[:winsize * numwin].reshape(-1, winsize)\n # Find the min, max, and mean within each window \n ymin = ywin.min(axis=1)\n ymax = ywin.max(axis=1)\n ymean = ywin.mean(axis=1)\n xmean = xwin.mean(axis=1)\n\n fill_artist = ax.fill_between(xmean, ymin, ymax, color=fill, \n edgecolor='none', alpha=0.5)\n line, = ax.plot(xmean, ymean, color=color, linestyle='-')\n return fill_artist, line\n\nif __name__ == '__main__':\n \tmain()\n\n","sub_path":"pythonScripts/angles.py","file_name":"angles.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"538221184","text":"from django.urls import path\nfrom .views import *\n\napp_name = 'links'\n\nurlpatterns = [\n path('status/', LinkView.as_view()),\n path('urls/', LinkCreate.as_view()),\n # path('links/', LinkListView.as_view(), name='link_list'),\n # path('links//', views.LinkDetailView.as_view(), name='link_detail'),\n]\n","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"95841366","text":"\"\"\"\"Test basic database functionality - essentially, test the test db\"\"\"\n\nfrom sqlalchemy import text, inspect\n\n\ndef test_db_session_works(session):\n result = session.execute(text(\"\"\"SELECT 1 AS one\"\"\")).first()\n assert result\n assert result.one == 1\n\n\ndef test_db_has_some_expected_tables(engine):\n expected_table_names = '''\n meta_contact\n meta_vars\n meta_history\n meta_sensor\n meta_network\n meta_station\n vars_per_history_mv\n obs_raw\n obs_derived_values\n meta_native_flag\n obs_raw_native_flags\n '''.split()\n inspector = inspect(engine)\n table_names = inspector.get_table_names(schema='crmp')\n for name in expected_table_names:\n assert name in table_names","sub_path":"tests/test_database.py","file_name":"test_database.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"20952988","text":"import pygame\nfrom pygame.sprite import Sprite\n\nclass Map(Sprite):\n def __init__(self, settings, screen):\n super(Map, self).__init__() # Needed to make Groups()\n self.image = pygame.image.load(\"Images/block1.png\") # Loads the image\n self.rect = self.image.get_rect() # pygame function to return the width and height of the picture\n self.rect.x, self.rect.y = 50, 50 # Sets the default position of block at (50, 50)\n self.settings = settings\n self.screen = screen\n\n def blit(self):\n self.screen.blit(self.image, self.rect)","sub_path":"SuperMarioKnockOff/venv/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"329717658","text":"from sys import stdout\nfrom sys import stdin\n\nfr = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\nto = \"nopqrstuvwxyzabcdefghijklmNOPQRSTUVWXYZABCDEFGHIJKLM5678901234\"\n\nlines = stdin.readlines()\nfor line in lines:\n for char in line:\n try:\n out = to[fr.index(char)]\n except:\n out = char\n stdout.write(out)\n stdout.write('\\n')\n","sub_path":"plspoj/paschar5.py","file_name":"paschar5.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"545262727","text":"# HNGi7 Task2. SlackID - orekoko\n\ndef hng_task2():\n\t\"\"\"Returns the required string as per HNGi7 task 2\"\"\"\n\tfirstname = 'Oreoluwa'\n\tlastname = 'Adetimehin'\n\thng_ID = 'HNG-03448'\n\tlanguage = 'Python'\n\temail = 'adetimehinoreoluwa@gmail.com'\n\tmessage = 'Hello World, this is [' + firstname + '] [' + lastname + '] with HNGi7 ID [' + hng_ID + '] using [' + language + '] for stage 2 task. ' + email\n\t\n\tprint(message)\n\n# Output the required\nhng_task2()","sub_path":"testScripts/orekoko.py","file_name":"orekoko.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"43968845","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torchvision\r\nfrom torch.autograd import Variable\r\nfrom tensorboardX import SummaryWriter\r\nfrom backbone_class_2_ssh import resnet_1D_34\r\nfrom ssh.network2.VLADssh2 import model\r\n\r\n# Creates writer1 object.\r\n# The log will be saved in 'runs/exp'\r\n# writer1 = SummaryWriter('runs/exp')\r\n\r\n# Creates writer2 object with auto generated file name\r\n# The log directory will be something like 'runs/Aug20-17-20-33'\r\n# writer2 = SummaryWriter()\r\n#\r\n# # Creates writer3 object with auto generated file name, the comment will be appended to the filename.\r\n# # The log directory will be something like 'runs/Aug20-17-20-33-resnet'\r\n# writer3 = SummaryWriter(comment='resnet')\r\n\r\nwriter = SummaryWriter('runs/scalar_example')\r\nfor i in range(10):\r\n writer.add_scalar('quadratic', i**2, global_step=i)\r\n writer.add_scalar('exponential', 2**i, global_step=i)\r\n\r\n\r\nclass Net1(nn.Module):\r\n def __init__(self):\r\n super(Net1, self).__init__()\r\n self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\r\n self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\r\n self.conv2_drop = nn.Dropout2d()\r\n self.fc1 = nn.Linear(320, 50)\r\n self.fc2 = nn.Linear(50, 10)\r\n self.bn = nn.BatchNorm2d(20)\r\n\r\n def forward(self, x):\r\n x = F.max_pool2d(self.conv1(x), 2)\r\n x = F.relu(x) + F.relu(-x)\r\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\r\n x = self.bn(x)\r\n x = x.view(-1, 320)\r\n x = F.relu(self.fc1(x))\r\n x = F.dropout(x, training=self.training)\r\n x = self.fc2(x)\r\n x = F.softmax(x, dim=1)\r\n return x\r\n\r\ndummy_input = Variable(torch.rand(13, 1, 28, 28))\r\n\r\nmodel22 = Net1()\r\nwith SummaryWriter(comment='Net1') as w:\r\n w.add_graph(model22, (dummy_input, ))\r\n\r\n\r\n\r\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\nnet = resnet_1D_34(10)\r\n\r\nif torch.cuda.device_count() > 1:\r\n print(\"Let's use\", torch.cuda.device_count(), \"GPUs!\")\r\n # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs\r\n # net = nn.DataParallel(net)\r\nelse:\r\n print(\"Let's use\", torch.cuda.device_count(), \"GPU!\")\r\nnet.to(device)\r\nmy_tensor = torch.randn(1, 1, 30000)\r\nmy_tensor = my_tensor.to(device)\r\ny = net(my_tensor)\r\ntorch.cuda.empty_cache()\r\n# input_shape: calculate the output shape from the cnn\r\ng_centres = 2\r\nprint(y.shape\r\n )\r\nmodel = model(mode='gvlad', k_centers=8, g_centers=g_centres, dim=256, input_shape=y.shape[-1]\r\n , kernel_size=10, num_classes=200)\r\nmodel.to(device)\r\nwith SummaryWriter(comment='4yp') as w:\r\n w.add_graph(model, (my_tensor, ))\r\n\r\n","sub_path":"tensorboardx_network2.py","file_name":"tensorboardx_network2.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"562616529","text":"\nfrom random import choice, randint\n\nfrom simulated_agency.simulation import Simulation\nfrom simulated_agency.agents import Mobile\nfrom simulated_agency.states import *\n\n\nclass WolfChasingTarget(State):\n '''\n A wolf in this state will move towards its\n target and kill that target if it ends up\n adjacent to the target after the move.\n '''\n\n name = \"WOLF_CHASING_TARGET\"\n colour = (0, 0, 255)\n required_params = ['target']\n\n def handle(self):\n super().handle()\n target = self.context['target']\n agent = self.agent\n agent.move_towards_target(target, adjacent_ok=True)\n # Can it kill the sheep it is chasing?\n neighbours = agent.location.neighbours()\n if target in neighbours:\n target.replace_state(Dead)\n agent.replace_state(WolfSelectingTarget)\n # Are there any targets of opportunity available?\n else:\n careless_sheep = [s for s in neighbours if isinstance(s, Sheep)]\n if careless_sheep:\n target = choice(careless_sheep)\n target.replace_state(Dead)\n agent.replace_state(WolfSelectingTarget)\n\n\n\nclass WolfSelectingTarget(State):\n '''\n Agents in this state will choose a target\n to follow and then begin following it.\n '''\n\n name = 'WOLF_SELECTING_TARGET'\n colour = (0, 0, 255)\n\n def handle(self):\n super().handle()\n target_list = Sheep.objects\n # Wolves will only chase Sheep that are alive\n live_target_list = [x for x in target_list if not x.is_in_state(Dead)]\n # Choose target\n if live_target_list:\n # Choose the nearest live target and pursue it\n target = self.agent.nearest(live_target_list)\n self.agent.add_state(WolfChasingTarget, target=target)\n else:\n # No sheep left to chase\n self.agent.add_state(MoveRandomly)\n\n\nclass SheepGrazing(State):\n '''\n Sheep that are grazing occasionally move around.\n But mainly they are recovering their energy.\n '''\n \n name = 'SHEEP_GRAZING'\n colour = (0, 255, 0)\n\n def handle(self):\n super().handle()\n agent = self.agent\n # Restore energy up to max\n if agent.energy < 5:\n agent.energy += 1\n # Is there a wolf nearby?\n nearest_wolf = agent.nearest(Wolf, radius=3)\n if nearest_wolf:\n agent.replace_state(SheepFleeing, enemy=nearest_wolf)\n return\n # Occasionally move\n if randint(1, 10) <= 3:\n self.agent.move_randomly()\n\n\nclass SheepFleeing(State):\n '''\n Sheep that are fleeing will do so until they tire.\n '''\n\n name = 'SHEEP_FLEEING'\n colour = (0, 255, 0)\n required_params = ['enemy']\n\n def handle(self):\n super().handle()\n agent = self.agent\n # Burn energy\n agent.energy -= 1\n if agent.energy == 0:\n agent.replace_state(SheepGrazing)\n return\n # Flee\n enemy = self.context['enemy']\n if agent.distance_to(enemy) < 3:\n agent.move_away_from_target(enemy)\n else:\n agent.replace_state(SheepGrazing)\n\n\n# Initialise simulation\nsimulation = Simulation(name='SheepAndWolves')\n\n# Use same base model for two types of object\nclass Sheep(Mobile): energy = 5\nclass Wolf(Mobile): pass\n\n# Bind models to simulation\nsimulation.bind(Wolf, Sheep)\n\n# Add some sheep to the simulation\nsimulation.seed(Sheep, 0.15, SheepGrazing)\n\n# Add some wolves to the simulation\nsimulation.seed(Wolf, 20, WolfSelectingTarget)\n\n# Run the simulation\nsimulation.execute(draw_locations=False)\n","sub_path":"examples/sheep_and_wolves.py","file_name":"sheep_and_wolves.py","file_ext":"py","file_size_in_byte":3660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"27048750","text":"import os\nfrom setuptools import setup, find_packages\n\n\n# Utility method to read the README.rst file.\ndef read(file_name):\n return open(os.path.join(os.path.dirname(__file__), file_name)).read()\n\n\nCLASSIFIERS = [\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering'\n]\n\ninstall_requirements_list = [\n 'arviz == 0.5.1',\n 'graphviz == 0.13',\n 'matplotlib == 3.1.1',\n 'numpy >= 1.17.2',\n 'pandas >= 0.25.1',\n 'pymc3 == 3.7',\n 'scipy >= 1.3.1',\n 'seaborn == 0.9.0',\n 'Theano == 1.0.4',\n]\n\nsetup(\n name='hybayes',\n version='0.0.3',\n description='Bayesian Assessment of Hypotheses',\n long_description=read('README.md'),\n long_description_content_type='text/markdown',\n include_package_data=True,\n url='https://github.com/allenai/HyBayes',\n download_url='https://github.com/allenai/HyBayes/archive/0.0.2.tar.gz',\n author='Erfan Sadeqi Azer, Daniel Khashabi',\n author_email='esamath@gmail.com',\n license='Apache 2.0',\n keywords=\"Bayesian Statistics, two groups test, Hypothesis Testing, Bayes Factor,\"\n \"NLP, natural language processing, \",\n #packages=['HyBayes'],\n packages=find_packages(exclude=['tests.*', 'tests']),\n classifiers=CLASSIFIERS,\n install_requires=install_requirements_list,\n zip_safe=False\n)\n\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"537345192","text":"import re\n\n\ndef get_n(line, path_regex):\n return len(re.findall(path_regex, line))\n\n\ndef write_result(res):\n with open(\"output.txt\", \"w\") as file:\n file.writelines(\"\\n\".join(res))\n\n\nletter_path = r\"[a-zA-Z]\"\npath_punctuation = r\"[-,\\.\\!\\?']\"\nresult = []\nwith open(\"text.txt\", \"r\") as file:\n lines = file.readlines()\n counter = 1\n for line in lines:\n n_letter = get_n(line, letter_path)\n n_punctuations = get_n(line, path_punctuation)\n result.append(f\"Line {counter}: {line[:-1]} ({n_letter})({n_punctuations})\")\n counter += 1\n\nwrite_result(result)","sub_path":"python_advanced_jan 2021/python_advanced/06 file handling/06.02. exercise/02_line_numbers.py","file_name":"02_line_numbers.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"639407710","text":"#!/usr/bin/env python\n#\n#tests/src/ecoop/bmarks2/macro/smtp/bin/load.py \n#tests/src/ecoop/bmarks2/macro/smtp/bin/load.py f localhost localhost 8888 7777 ST 2\t\n\nimport socket\nimport sys\n\nimport common\n\n\n# Command line arguments.\n\nif len(sys.argv) != 8:\n\tcommon.printAndFlush('Usage: load.py ')\n\tsys.exit(1)\n\t\ndebug = common.parseBoolean(sys.argv[1])\nenv = sys.argv[2] # e.g. 'localhost' or 'camelot'\nserverName = sys.argv[3]\nsport = sys.argv[4]\nwport = sys.argv[5]\nversion = sys.argv[6]\nrepeats = int(sys.argv[7])\n\n\n# Benchmark configuration parameters.\n\nif version == 'ALL':\n\tversions = common.versions\t\t\t\t\nelse:\n\tversions = [version]\n\nif env == 'localhost':\n\trenv = 'bin/sessionj'\n\n\thostname = 'localhost'\t \n\tclient = common.getLocalhostClient() \n\tworkers = common.getLocalhostWorkers() \n\t\n\t(numClients, messageSizes) = common.getLocalhostParameters()\nelif env == 'camelot':\n\trenv = 'bin/csessionj'\n\n\thostname = socket.gethostname()\n\tclient = common.getCamelotClient() \n\t\t\n\tif debug:\n\t\tworkers = common.getCamelotDebugWorkers() \n\t\t(numClients, messageSizes) = common.getDebugParameters()\n\telse:\n\t\tworkers = common.getCamelotWorkers() \n\t\t(numClients, messageSizes) = common.getParameters()\t\nelse:\n\tcommon.printAndFlush('Unknown environment: ' + env)\n\tsys.exit(1)\n\ndelay = '50' # Milliseconds in between LoadClient requests.\n\n\n# Main.\n\ncommon.printAndFlush('Configuration: server=' + serverName + ', worker=' + hostname)\ncommon.printAndFlush('Global: versions=' + str(versions) + ', numClients=' + str(numClients) + ', messageSizes=' + str(messageSizes))\n\nserverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\nserverSocket.bind((hostname, int(wport)))\nserverSocket.listen(5) # 5 seems to be a kind of default.\n\ncommon.debugPrint(debug, 'Listening on port: ' + wport)\n\n(s, address) = serverSocket.accept()\n\ncommon.debugPrint(debug, 'Server script connected, starting main loop...')\n\nfor v in versions:\n\tfor clients in numClients:\n\t\tclients = str(int(clients) / len(workers))\n\t\t\t \n\t\tfor size in messageSizes:\n\t\t\tfor i in range(0, repeats): \n\t\t\t\tcommon.printAndFlush('Parameters: version=' + v + ', clients=' + clients + ', size=' + size + ', trial=' + str(i))\n\t\t\t\t\t\t\t \n\t\t\t\ts.recv(1024);\n\t\t\t\t\n\t\t\t\tif v == 'SE':\n\t\t\t\t\tsport1 = str(int(sport) + 200)\n\t\t\t\telse:\n\t\t\t\t\tsport1 = sport\t\n\t\t\t\t\n\t\t\t\tcommand = renv + ' -cp tests/classes ecoop.bmarks2.macro.smtp.ClientRunner ' + str(debug) + ' ' + serverName + ' ' + sport1 + ' ' + wport + ' ' + delay + ' ' + clients + ' ' + size\t+ ' ' + sport\t\t\t\n\t\t\t\tcommon.debugPrint(debug, 'Command: ' + command)\t\t\t\t\t\t\n\t\t\t\t\n\t\t\t\tct = common.CommandThread(command)\n\t\t\t\tct.start();\n\n\t\t\t\t(s1, address) = serverSocket.accept() # Get signal from ClientRunner that all threads have been started.\t\t\t\t\t\n\t\t\t\t#s1.recv(1024);\n\t\t\t\t\n\t\t\t\ts.send('2');\t\t\t\t\t\n\t\t\t\t\n\t\t\t\tct.join()\n\t\t\t\t","sub_path":"trunk/tests/src/ecoop/bmarks2/macro/smtp/bin/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":2947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"42496611","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport sys\nsw = set() \nfelse = open(\"/home/ec2-user/git/statresult/else_words.txt\",\"r\")\nf1 = open(\"/home/ec2-user/git/statresult/1.txt\",\"r\")\nfsw = open(\"/home/ec2-user/git/statresult/stopword2.txt\",\"r\")\nfen = open(\"/home/ec2-user/git/statresult/gen2.txt\",\"r\")\nfout = open(\"/home/ec2-user/git/statresult/stopword4.txt\",\"w\")\nfol = open(\"/home/ec2-user/git/statresult/onlyonce.txt\",\"r\")\n\nfor line in felse:\n term = line.strip('\\n')\n if term not in sw:\n sw.add(term)\nfelse.close()\nfor line in f1:\n term = line.strip('\\n')\n if term not in sw:\n sw.add(term)\nf1.close()\nfor line in fol:\n term = line.strip('\\n')\n if term not in sw:\n sw.add(term)\nfol.close()\n\nfor line in fsw:\n term = line.split()[0]\n if term not in sw:\n sw.add(term)\nfsw.close()\nfor line in fen:\n term = line.strip('\\n')\n if term not in sw:\n sw.add(term)\nfen.close()\n\nfor term in sw:\n fout.write(term + \"\\n\")\nfout.close()\n","sub_path":"stopword4.py","file_name":"stopword4.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"46117513","text":"##############################################################################\n#\n# Copyright (c) 2001, 2002 Zope Corporation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"Publication Traverser\n\n$Id$\n\"\"\"\n__docformat__ = 'restructuredtext'\nfrom types import StringTypes\n\nfrom zope.component import queryMultiAdapter\nfrom zope.publisher.interfaces import NotFound\nfrom zope.security.checker import ProxyFactory\nfrom zope.traversing.namespace import namespaceLookup\nfrom zope.traversing.namespace import nsParse\nfrom zope.traversing.interfaces import TraversalError\nfrom zope.publisher.interfaces import IPublishTraverse\n\nclass DuplicateNamespaces(Exception):\n \"\"\"More than one namespace was specified in a request\"\"\"\n\nclass UnknownNamespace(Exception):\n \"\"\"A parameter specified an unknown namespace\"\"\"\n\nclass PublicationTraverse(object):\n\n def traverseName(self, request, ob, name):\n nm = name # the name to look up the object with\n\n if name and name[:1] in '@+':\n # Process URI segment parameters.\n ns, nm = nsParse(name)\n if ns:\n try:\n ob2 = namespaceLookup(ns, nm, ob, request)\n except TraversalError:\n raise NotFound(ob, name)\n\n return ProxyFactory(ob2)\n\n if nm == '.':\n return ob\n\n if IPublishTraverse.providedBy(ob):\n ob2 = ob.publishTraverse(request, nm)\n else:\n # self is marker\n adapter = queryMultiAdapter((ob, request), IPublishTraverse,\n default=self)\n if adapter is not self:\n ob2 = adapter.publishTraverse(request, nm)\n else:\n raise NotFound(ob, name, request)\n\n return ProxyFactory(ob2)\n\nclass PublicationTraverser(PublicationTraverse):\n\n def traversePath(self, request, ob, path):\n\n if isinstance(path, StringTypes):\n path = path.split('/')\n if len(path) > 1 and not path[-1]:\n # Remove trailing slash\n path.pop()\n else:\n path = list(path)\n\n # Remove single dots\n path = [x for x in path if x != '.']\n\n path.reverse()\n\n # Remove double dots\n while '..' in path:\n l = path.index('..')\n if l < 0 or l+2 > len(path):\n break\n del path[l:l+2]\n\n pop = path.pop\n\n while path:\n name = pop()\n ob = self.traverseName(request, ob, name)\n\n return ob\n","sub_path":"zope.app.publication/tags/3.4.0/src/zope/app/publication/publicationtraverse.py","file_name":"publicationtraverse.py","file_ext":"py","file_size_in_byte":3035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"604154202","text":"import json\nfrom openpyxl import Workbook\n\nfrom lib.common import RestApi\n\n\ndef export_case_to_excel(filepath: str, auth: str=None) -> None:\n \"\"\"\n 导出case详细信息到excel文档(.xlsx)\n \"\"\"\n wb = Workbook()\n ws = wb.active\n api = RestApi(\"/api/v1/case\", auth=auth)\n resp = api.get(verify=False)\n resp_dict = json.loads(resp.text)\n case_list = resp_dict.get(\"data\")\n # 表头\n ws.cell(1, 1, \"测试套\")\n ws.cell(1, 2, \"用例名\")\n ws.cell(1, 3, \"测试级别\")\n ws.cell(1, 4, \"测试类型\")\n ws.cell(1, 5, \"用例描述\")\n ws.cell(1, 6, \"节点数\")\n ws.cell(1, 7, \"预置条件\")\n ws.cell(1, 8, \"操作步骤\")\n ws.cell(1, 9, \"预期输出\")\n ws.cell(1, 10, \"是否自动化\")\n ws.cell(1, 11, \"备注\")\n # 内容\n row = 2\n for _case in case_list:\n ws.cell(row, 1, _case.get(\"suite\"))\n ws.cell(row, 2, _case.get(\"name\"))\n ws.cell(row, 3, _case.get(\"test_level\"))\n ws.cell(row, 4, _case.get(\"test_type\"))\n ws.cell(row, 5, _case.get(\"description\"))\n\n ws.cell(row, 7, _case.get(\"preset\"))\n ws.cell(row, 8, _case.get(\"steps\"))\n ws.cell(row, 9, _case.get(\"expection\"))\n ws.cell(row, 10, _case.get(\"automatic\"))\n ws.cell(row, 11, _case.get(\"remark\"))\n\n # 计算节点数\n case_id = _case.get(\"id\")\n node_num = 0\n api = RestApi(\"/api/v1/case-node\")\n resp = api.get(verify=False)\n resp_dict = json.loads(resp.text)\n case_node_list = resp_dict.get(\"data\")\n for _case_node in case_node_list:\n if _case_node.get(\"case_id\") == case_id:\n node_num = node_num + 1\n ws.cell(row, 6, node_num)\n\n row = row + 1\n\n wb.save(filepath)\n","sub_path":"radiaTest-server/server/utils/excel_util.py","file_name":"excel_util.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"575763609","text":"from django.db import models\r\nfrom django.contrib import admin\r\n\r\nfrom scripts.utils import link_to_foreign_key\r\nfrom catalog.models.productLot import *\r\n#from .subOrder import SubOrder\r\n#from .orderShipment import OrderShipment\r\n#from .payments import SellerPayment\r\nfrom django.utils import timezone\r\n\r\nfrom catalog.models.product import Product\r\n\r\nclass OrderItem(models.Model):\r\n\r\n\tsuborder = models.ForeignKey('orders.SubOrder')\r\n\tproduct = models.ForeignKey('catalog.Product')\r\n\torder_shipment = models.ForeignKey('orders.OrderShipment',null=True,blank=True)\r\n\tseller_payment = models.ForeignKey('orders.SellerPayment',null=True,blank=True)\r\n\tcartitem = models.ForeignKey('orders.CartItem', null=True, blank=True)\r\n\r\n\tpieces = models.PositiveIntegerField(default=0)\r\n\tlots = models.PositiveIntegerField(default=0)\r\n\tretail_price_per_piece = models.DecimalField(max_digits=10, decimal_places=2,default=0)\r\n\tcalculated_price_per_piece = models.DecimalField(max_digits=10, decimal_places=2,default=0)\r\n\tedited_price_per_piece = models.DecimalField(max_digits=10, decimal_places=2,default=0)\r\n\t\r\n\tfinal_price = models.DecimalField(max_digits=10, decimal_places=2)\r\n\tlot_size = models.PositiveIntegerField(default=1)\r\n\r\n\tcreated_at = models.DateTimeField(auto_now_add=True)\r\n\tupdated_at = models.DateTimeField(auto_now=True)\r\n\r\n\tcurrent_status = models.IntegerField(default=0)\r\n\r\n\tbuyer_payment_status = models.BooleanField(default=False)\r\n\r\n\tremarks = models.TextField(blank=True)\r\n\r\n\tcancellation_remarks = models.TextField(blank=True)\r\n\tcancellation_time = models.DateTimeField(null=True, blank=True)\r\n\r\n\tclass Meta:\r\n\t\tordering = [\"-id\"]\r\n\t\tdefault_related_name = \"orderitem\"\r\n\t\tverbose_name=\"Order Item\"\r\n\t\tverbose_name_plural = \"Order Items\"\r\n\r\n\tdef __unicode__(self):\r\n\t\treturn \"{} - {} - Price: {} - {} - {}\".format(self.id,self.suborder.display_number,self.final_price,self.product.display_name,self.product.seller.name)\r\n\r\n\tdef populateDataFromCartItem(self, cartItemPtr):\r\n\t\tself.cartitem = cartItemPtr\r\n\t\tself.product_id = cartItemPtr.product_id\r\n\t\tself.pieces = cartItemPtr.pieces\r\n\t\tself.lots = cartItemPtr.lots\r\n\t\tself.lot_size = cartItemPtr.lot_size\r\n\t\tself.retail_price_per_piece = cartItemPtr.retail_price_per_piece\r\n\t\tself.calculated_price_per_piece = cartItemPtr.calculated_price_per_piece\r\n\t\tself.edited_price_per_piece = cartItemPtr.calculated_price_per_piece\r\n\t\tself.final_price = cartItemPtr.final_price\r\n\t\tself.current_status = 0\r\n\t\tself.remarks = cartItemPtr.remarks\r\n\r\nclass OrderItemAdmin(admin.ModelAdmin):\r\n\tsearch_fields = [\"suborder__display_number\", \"product__name\"]\r\n\tlist_display = [\"id\", \"link_to_suborder\", \"link_to_product\", \"final_price\", \"pieces\"]\r\n\r\n\tlist_display_links = [\"id\",\"link_to_suborder\",\"link_to_product\"]\r\n\r\n\tlist_filter = [\"current_status\"]\r\n\r\n\tdef link_to_suborder(self, obj):\r\n\t\treturn link_to_foreign_key(obj, \"suborder\")\r\n\tlink_to_suborder.short_description = \"Suborder\"\r\n\tlink_to_suborder.allow_tags=True\r\n\r\n\tdef link_to_product(self, obj):\r\n\t\treturn link_to_foreign_key(obj, \"product\")\r\n\tlink_to_product.short_description = \"Product\"\r\n\tlink_to_product.allow_tags=True\r\n\r\ndef populateOrderItemData(OrderItemPtr, orderItem):\r\n\tOrderItemPtr.pieces = int(orderItem[\"pieces\"])\r\n\tOrderItemPtr.lots = int(orderItem[\"lots\"])\r\n\tOrderItemPtr.retail_price_per_piece = Decimal(orderItem[\"retail_price_per_piece\"])\r\n\tOrderItemPtr.calculated_price_per_piece = Decimal(orderItem[\"calculated_price_per_piece\"])\r\n\tOrderItemPtr.edited_price_per_piece = Decimal(orderItem[\"edited_price_per_piece\"])\r\n\tOrderItemPtr.final_price = Decimal(orderItem[\"final_price\"])\r\n\tOrderItemPtr.lot_size = int(orderItem[\"lot_size\"])\r\n\tOrderItemPtr.remarks = orderItem[\"remarks\"]\r\n\tOrderItemPtr.current_status = 1\r\n\r\ndef validateOrderItemStatus(status, current_status):\r\n\tif current_status == 0 and not (status == 1 or status == 10):\r\n\t\treturn False\r\n\telif current_status == 1 and not (status == 2 or status == 10):\r\n\t\treturn False\r\n\telif current_status == 2 and not(status == 3 or status == 10):\r\n\t\treturn False\r\n\telif current_status == 3 and not(status == 4 or status == 10):\r\n\t\treturn False\r\n\telif current_status == 4 and not(status == 5 or status == 6 or status == 7 or status == 9):\r\n\t\treturn False\r\n\telif current_status == 5 and not(status == 6 or status == 7 or status == 9):\r\n\t\treturn False\r\n\telif current_status == 6 and not(status == 11 or status == 7):\r\n\t\treturn False\r\n\telif current_status == 7 and not(status == 8 or status == 9):\r\n\t\treturn False\r\n\telif current_status == 8 and not(status == 12):\r\n\t\treturn False\r\n\telif current_status == 9 and not(status == 12):\r\n\t\treturn False\r\n\telif current_status == 10 and not(status == 12):\r\n\t\treturn False\r\n\telif current_status == 11 and not(status == 12):\r\n\t\treturn False\r\n\telif current_status == 12:\r\n\t\treturn False\r\n\treturn True\r\n\r\ndef update_order_item_status(orderShipmentID, status):\r\n\r\n\torderItemQuerySet = OrderItem.objects.filter(order_shipment_id =orderShipmentID).update(current_status = status, updated_at = timezone.now())\r\n\r\ndef filterOrderItem(orderItemParameters):\r\n\torderItems = OrderItem.objects.all().select_related('product')\r\n\t\t\r\n\tif \"orderItemArr\" in orderItemParameters:\r\n\t\torderItems = orderItems.filter(id__in=orderItemParameters[\"orderItemArr\"])\r\n\r\n\tif \"orderItemStatusArr\" in orderItemParameters:\r\n\t\torderItems = orderItems.filter(current_status__in=orderItemParameters[\"orderItemStatusArr\"])\r\n\r\n\tif \"sellersArr\" in orderItemParameters:\r\n\t\torderItems = orderItems.filter(suborder__seller_id__in=orderItemParameters[\"sellersArr\"])\r\n\r\n\tif \"subOrderArr\" in orderItemParameters:\r\n\t\torderItems = orderItems.filter(suborder_id__in=orderItemParameters[\"subOrderArr\"])\r\n\r\n\tif \"orderArr\" in orderItemParameters:\r\n\t\torderItems = orderItems.filter(suborder__order_id__in=orderItemParameters[\"orderArr\"])\r\n\r\n\tif \"orderShipmentArr\" in orderItemParameters:\r\n\t\torderItems = orderItems.filter(order_shipment_id__in=orderItemParameters[\"orderShipmentArr\"])\r\n\r\n\treturn orderItems\r\n\r\n\r\nOrderItemStatus = {\r\n\t0:{\"display_value\":\"Placed\"},\r\n\t1:{\"display_value\":\"Confirmed\"},\r\n\t2:{\"display_value\":\"Merchant notified\"},\r\n\t3:{\"display_value\":\"Shipped\"},\r\n\t4:{\"display_value\":\"Cancelled\"},\r\n\t5:{\"display_value\":\"Sent for Pickup\"},\r\n\t6:{\"display_value\":\"Shipment created\"},\r\n\t7:{\"display_value\":\"3PL notified\"},\r\n\t8:{\"display_value\":\"3PL manifested\"},\r\n\t9:{\"display_value\":\"3PL in transit\"},\r\n\t10:{\"display_value\":\"3PL stuck in transit\"},\r\n\t11:{\"display_value\":\"Delivered\"},\r\n\t12:{\"display_value\":\"RTO in transit\"},\r\n\t13:{\"display_value\":\"RTO delivered\"},\r\n\t14:{\"display_value\":\"Lost\"}\r\n}\r\n\r\nOrderItemCompletionStatus = [4, 11, 13, 14]\r\nOrderItemNonCompletionStatus = [0,1,2,3,5,6,7,8,9,10,12]\r\n\r\ndef populateMailOrderItem(OrderItemPtr):\r\n\r\n\tproductPtr = Product.objects.filter(id=OrderItemPtr.product_id)\r\n\tproductPtr = productPtr[0]\r\n\r\n\timageLink = productPtr.get_image_url(200)\r\n\tproductLink = productPtr.get_absolute_url()\r\n\titemMargin = float((OrderItemPtr.retail_price_per_piece - OrderItemPtr.edited_price_per_piece)/OrderItemPtr.retail_price_per_piece*100)\r\n\r\n\tmailOrderItem = {\r\n\t\t\"name\":productPtr.display_name,\r\n\t\t\"catalog_number\":productPtr.productdetails.seller_catalog_number,\r\n\t\t\"pieces\":OrderItemPtr.pieces,\r\n\t\t\"price_per_piece\":OrderItemPtr.edited_price_per_piece,\r\n\t\t\"final_price\":OrderItemPtr.final_price,\r\n\t\t\"image_link\":imageLink,\r\n\t\t\"product_link\":productLink,\r\n\t\t\"margin\":'{0:.1f}'.format(itemMargin)\r\n\t}\r\n\t\t\r\n\tif OrderItemPtr.remarks != \"\":\r\n\t\tmailOrderItem[\"remarks\"] = OrderItemPtr.remarks\r\n\r\n\treturn mailOrderItem","sub_path":"orders/models/orderItem.py","file_name":"orderItem.py","file_ext":"py","file_size_in_byte":7516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"154731604","text":"#-*- coding=utf-8 -*-\nimport urllib.request as urllib2\nimport ssl\nfrom lxml import etree\n\nurl='https://movie.douban.com/top250'\ncontext=ssl.SSLContext(ssl.PROTOCOL_TLSv1_1)\n\ndef fetch_page(url):\n\tresponse=urllib2.urlopen(url,context=context)\ndef parse(url):\n\tresponse=fetch_page(url)\n\tpage=response.read()\n\thtml=etree.HTML(page)\n\txpath_movie = '//*[@id=\"content\"]/div/div[1]/ol/li'\n\txpath_title = './/span[@class=\"title\"]'\n\txpath_pages = '//*[@id=\"content\"]/div/div[1]/div[2]/a'\n\n\tpages = html.xpath(xpath_pages)\n\tfetch_list = []\n\tresult = []\n\n\tfor element_movie in html.xpath(xpath_movie):\n\t\tresult.append(element_movie)\n\n\tfor p in pages:\n\t\tfetch_list.append(url + p.get('href'))\n\n\tfor url in fetch_list:\n\t\tresponse = fetch_page(url)\n\t\tpage = response.read()\n\t\thtml = etree.HTML(page)\n\t\tfor element_movie in html.xpath(xpath_movie):\n\t\t\tresult.append(element_movie)\n\n\tfor i, movie in enumerate(result, 1):\n\t\ttitle = movie.find(xpath_title).text \n\t\tprint(i, title)\ndef main():\n\tfrom time import time\n\tstart=time()\n\tfor i in range(5):\n\t\tparse(url)\n\tend=time()\n\tprint(\"cost time:{}\".format((end-start)/5))\n\n\t\nif __name__ == '__main__':\n\tmain()","sub_path":"DoubanMovieSpider/Spiderdouban.py","file_name":"Spiderdouban.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"36053127","text":"# -*- mode: python; coding: utf-8 -*-\n# Copyright 2020-2021 the .NET Foundation\n# Licensed under the MIT License.\n\n\"\"\"Entrypoint for the \"wwtdatatool\" command-line interface.\n\n\"\"\"\nimport argparse\nimport os.path\nimport sys\n\n\n__all__ = [\n \"GLOB_PATHS_INTERNALLY\",\n \"EnsureGlobsExpandedAction\",\n \"entrypoint\",\n \"serve_getparser\",\n]\n\n\n# General CLI utilities\n\n\ndef die(msg):\n print(\"error:\", msg, file=sys.stderr)\n sys.exit(1)\n\n\ndef warn(msg):\n print(\"warning:\", msg, file=sys.stderr)\n\n\nGLOB_PATHS_INTERNALLY = os.name == \"nt\" # non-Windows has reasonable shells\n\n\nclass EnsureGlobsExpandedAction(argparse.Action):\n \"\"\"\n An action to handle globbing for path-list arguments on Windows.\n\n If the CLI program is being run from the Windows command prompt, there is no\n expansion of globs by the shell. It turns out that to get globbing behavior,\n we have to manually implement it ourselves. This class helps make this\n convenient. It can be used as a ``action=`` keyword argument to\n ``ArgumentParser.add_argument()`` and, on Windows, will process argument\n text to apply globs. This argument should generally be used on arguments\n with a ``nargs='+'`` cardinality.\n \"\"\"\n\n def __call__(self, parser, namespace, values, option_string=None):\n setattr(namespace, self.dest, self.expand_globs(values))\n\n def expand_globs(self, path_args):\n if not GLOB_PATHS_INTERNALLY:\n return path_args\n\n import glob\n\n result = []\n\n for p in path_args:\n if not glob.has_magic(p):\n result.append(p)\n else:\n matches = glob.glob(p, recursive=True)\n\n if matches:\n result += matches\n else:\n # Unix-like behavior: unmatched glob expression is passed on through\n result.append(p)\n\n return result\n\n\n# \"cabinet\" subcommand\n\n\ndef cabinet_getparser(parser):\n subparsers = parser.add_subparsers(dest=\"cabinet_command\")\n\n p = subparsers.add_parser(\"list\")\n p.add_argument(\n \"path\",\n metavar=\"PATH\",\n help=\"The path to a cabinet file.\",\n )\n\n p = subparsers.add_parser(\"pack\")\n p.add_argument(\n \"cab_path\",\n metavar=\"PATH\",\n help=\"The path of the cabinet file to create.\",\n )\n p.add_argument(\n \"input_paths\",\n nargs=\"+\",\n action=EnsureGlobsExpandedAction,\n metavar=\"PATHS\",\n help=\"Paths to files to put into the cabinet.\",\n )\n\n p = subparsers.add_parser(\"unpack\")\n p.add_argument(\n \"path\",\n metavar=\"PATH\",\n help=\"The path to a cabinet file.\",\n )\n\n\ndef cabinet_list(settings):\n from .filecabinet import FileCabinetReader\n\n with open(settings.path, \"rb\") as f:\n reader = FileCabinetReader(f)\n\n for fn in reader.filenames():\n print(fn)\n\n\ndef cabinet_pack(settings):\n from .filecabinet import FileCabinetWriter\n import os.path\n\n writer = FileCabinetWriter()\n\n for fn in settings.input_paths:\n with open(fn, \"rb\") as f:\n data = f.read()\n\n # TODO: smarter splitting\n pieces = fn.split(os.path.sep)\n\n for p in pieces:\n if p in (\".\", \"..\", \"\"):\n die(\n f'illegal input path \"{fn}\": must be relative with no \".\", \"..\" components'\n )\n\n writer.add_file_with_data(\"\\\\\".join(pieces), data)\n\n with open(settings.cab_path, \"wb\") as f_out:\n writer.emit(f_out)\n\n\ndef cabinet_unpack(settings):\n from .filecabinet import FileCabinetReader\n from os import makedirs\n from os.path import join\n\n with open(settings.path, \"rb\") as f_in:\n reader = FileCabinetReader(f_in)\n\n for fn in reader.filenames():\n data = reader.read_file(fn)\n pieces = fn.split(\"\\\\\") # paths are Windows-style\n\n # At least the MakeDataCabinetFile tool creates a file whose\n # paths all begin with \\. We are not gonna treat those as\n # absolute paths or anything like that.\n if not len(pieces[0]):\n pieces = pieces[1:]\n\n if len(pieces) > 1:\n makedirs(join(*pieces[:-1]), exist_ok=True)\n\n with open(join(*pieces), \"wb\") as f_out:\n f_out.write(data)\n\n\ndef cabinet_impl(settings):\n if settings.cabinet_command is None:\n print('Run the \"cabinet\" command with `--help` for help on its subcommands')\n return\n\n if settings.cabinet_command == \"list\":\n return cabinet_list(settings)\n elif settings.cabinet_command == \"pack\":\n return cabinet_pack(settings)\n elif settings.cabinet_command == \"unpack\":\n return cabinet_unpack(settings)\n else:\n die('unrecognized \"cabinet\" subcommand ' + settings.cabinet_command)\n\n\n# \"preview\" subcommand\n\n\ndef preview_getparser(parser):\n parser.add_argument(\n \"--browser\",\n \"-b\",\n metavar=\"BROWSER-TYPE\",\n help=\"The type of browser to use for the preview (as per Python webbrowser)\",\n )\n parser.add_argument(\n \"--research\", \"-r\", action=\"store_true\", help=\"Preview in the WWT Research App\"\n )\n parser.add_argument(\n \"--appurl\",\n metavar=\"URL\",\n help=\"The URL of the app to use; useful for development\",\n )\n parser.add_argument(\n \"wtml_path\",\n metavar=\"PATH\",\n help=\"The path to the WTML file to preview\",\n )\n\n\ndef preview_impl(settings):\n from .server import preview_wtml\n\n app = \"webclient\"\n\n if settings.research:\n app = \"research\"\n\n preview_wtml(\n settings.wtml_path,\n browser=settings.browser,\n app_type=app,\n app_url=settings.appurl,\n )\n\n\n# \"serve\" subcommand\n\n\ndef serve_getparser(parser):\n parser.add_argument(\n \"--port\",\n \"-p\",\n metavar=\"PORT\",\n type=int,\n default=8080,\n help=\"The port on which to listen for connections.\",\n )\n parser.add_argument(\n \"--heartbeat\",\n action=\"store_true\",\n help=\"Print periodic heartbeat messages to stdout and terminate on failure.\",\n )\n parser.add_argument(\n \"root_dir\",\n metavar=\"PATH\",\n default=\".\",\n help=\"The path to the base directory of the server.\",\n )\n\n\ndef serve_impl(settings):\n from .server import run_server\n\n run_server(settings)\n\n\n# \"show\" subcommand\n\n\ndef show_getparser(parser):\n subparsers = parser.add_subparsers(dest=\"show_command\")\n _parser = subparsers.add_parser(\"concept-doi\")\n _parser = subparsers.add_parser(\"version\")\n _parser = subparsers.add_parser(\"version-doi\")\n\n\ndef show_impl(settings):\n if settings.show_command is None:\n print('Run the \"show\" command with `--help` for help on its subcommands')\n return\n\n if settings.show_command == \"concept-doi\":\n # This string constant will be rewritten by Cranko during releases:\n doi = \"xx.xxxx/dev-build.wwt_data_formats.concept\"\n if not doi.startswith(\"10.\"):\n warn(\"this DOI is a fake value used for development builds\")\n print(doi)\n elif settings.show_command == \"version\":\n # This string constant will be rewritten by Cranko during releases:\n version = \"0.dev0\" # cranko project-version\n print(version)\n elif settings.show_command == \"version-doi\":\n # This string constant will be rewritten by Cranko during releases:\n doi = \"xx.xxxx/dev-build.wwt_data_formats.version\"\n if not doi.startswith(\"10.\"):\n warn(\"this DOI is a fake value used for development builds\")\n print(doi)\n else:\n die('unrecognized \"show\" subcommand ' + settings.show_command)\n\n\n# \"tree\" subcommand\n\n\ndef tree_getparser(parser):\n subparsers = parser.add_subparsers(dest=\"tree_command\")\n\n p = subparsers.add_parser(\"fetch\")\n p.add_argument(\n \"root_url\",\n metavar=\"URL\",\n help=\"The URL of the initial WTML file to download.\",\n )\n\n p = subparsers.add_parser(\"print-dem-urls\")\n p = subparsers.add_parser(\"print-image-urls\")\n p = subparsers.add_parser(\"summarize\")\n\n\ndef tree_impl(settings):\n if settings.tree_command is None:\n print('Run the \"tree\" command with `--help` for help on its subcommands')\n return\n\n if settings.tree_command == \"fetch\":\n return tree_fetch(settings)\n elif settings.tree_command == \"print-dem-urls\":\n return tree_print_dem_urls(settings)\n elif settings.tree_command == \"print-image-urls\":\n return tree_print_image_urls(settings)\n elif settings.tree_command == \"summarize\":\n return tree_summarize(settings)\n else:\n die('unrecognized \"tree\" subcommand ' + settings.tree_command)\n\n\ndef tree_fetch(settings):\n from .folder import fetch_folder_tree\n\n def on_fetch(url):\n print(\"Fetching\", url, \"...\")\n\n fetch_folder_tree(settings.root_url, \".\", on_fetch)\n\n\ndef tree_print_dem_urls(settings):\n from .folder import Folder, walk_cached_folder_tree\n from .imageset import ImageSet\n from .place import Place\n\n done_urls = set()\n\n for treepath, item in walk_cached_folder_tree(\".\"):\n imgset = None\n\n if isinstance(item, ImageSet):\n imgset = item\n elif isinstance(item, Place):\n imgset = item.as_imageset()\n\n if imgset is None:\n continue\n\n if not imgset.dem_url or imgset.dem_url in done_urls:\n continue\n\n done_urls.add(imgset.dem_url)\n print(imgset.dem_url, imgset.name)\n\n\ndef tree_print_image_urls(settings):\n from .folder import Folder, walk_cached_folder_tree\n from .imageset import ImageSet\n from .place import Place\n\n done_urls = set()\n\n for treepath, item in walk_cached_folder_tree(\".\"):\n imgset = None\n\n if isinstance(item, ImageSet):\n imgset = item\n elif isinstance(item, Place):\n imgset = item.as_imageset()\n\n if imgset is None:\n continue\n\n for url, tag in zip((imgset.url, imgset.alt_url), (\"\", \" (alt)\")):\n if not url or url in done_urls:\n continue\n\n done_urls.add(url)\n print(url, imgset.name + tag)\n\n\ndef tree_summarize(settings):\n from .folder import Folder, walk_cached_folder_tree\n from .imageset import ImageSet\n from .place import Place\n\n for treepath, item in walk_cached_folder_tree(\".\"):\n pfx = \" \" * len(treepath)\n\n if isinstance(item, Folder):\n print(pfx + \"Folder\", item.name)\n elif isinstance(item, ImageSet):\n index = treepath[-1]\n print(f\"{pfx}{index:03d}\", \"ImageSet:\", item.name, \"@\", item.url)\n elif isinstance(item, Place):\n maybe_imgset = item.as_imageset()\n if maybe_imgset is not None:\n index = treepath[-1]\n print(\n f\"{pfx}{index:03d}\",\n \"Place+ImgSet:\",\n item.name,\n \"@\",\n maybe_imgset.url,\n )\n\n\n# \"wtml\" subcommand\n\n\ndef wtml_getparser(parser):\n subparsers = parser.add_subparsers(dest=\"wtml_command\")\n\n p = subparsers.add_parser(\"merge\")\n p.add_argument(\n \"--merged-name\",\n default=\"Folder\",\n help=\"The name to give to the merged folder.\",\n )\n p.add_argument(\n \"--merged-thumb-url\",\n default=\"\",\n help=\"The thumbnail URL to give to the merged folder.\",\n )\n p.add_argument(\n \"in_paths\",\n nargs=\"+\",\n action=EnsureGlobsExpandedAction,\n metavar=\"IN-WTML-PATH\",\n help=\"The path to the input WTML files.\",\n )\n p.add_argument(\n \"out_path\",\n metavar=\"OUT-WTML-PATH\",\n help=\"The path to the output WTML file.\",\n )\n\n p = subparsers.add_parser(\"report\")\n p.add_argument(\n \"path\",\n metavar=\"WTML\",\n help=\"The path to a WTML file.\",\n )\n\n p = subparsers.add_parser(\"rewrite-disk\")\n p.add_argument(\n \"in_path\",\n metavar=\"INPUT-WTML\",\n help=\"The path to the input WTML file.\",\n )\n p.add_argument(\n \"out_path\",\n metavar=\"OUTPUT-WTML\",\n help=\"The path of the rewritten, output WTML file.\",\n )\n\n p = subparsers.add_parser(\"rewrite-urls\")\n p.add_argument(\n \"in_path\",\n metavar=\"INPUT-WTML\",\n help=\"The path to the input WTML file.\",\n )\n p.add_argument(\n \"baseurl\",\n metavar=\"BASE-URL\",\n help=\"The new base URL to use in the file's contents\",\n )\n p.add_argument(\n \"out_path\",\n metavar=\"OUTPUT-WTML\",\n help=\"The path of the rewritten, output WTML file.\",\n )\n\n p = subparsers.add_parser(\"transfer-astrometry\")\n p.add_argument(\n \"in_path\",\n metavar=\"INPUT-WTML\",\n help=\"The path to the input WTML file with refined astrometric solutions.\",\n )\n p.add_argument(\n \"update_paths\",\n nargs=\"+\",\n action=EnsureGlobsExpandedAction,\n metavar=\"UPDATE-WTML\",\n help=\"Paths of WTML files to update with data from the input file.\",\n )\n\n\ndef wtml_impl(settings):\n if settings.wtml_command is None:\n print('Run the \"wtml\" command with `--help` for help on its subcommands')\n return\n\n if settings.wtml_command == \"merge\":\n return wtml_merge(settings)\n elif settings.wtml_command == \"report\":\n return wtml_report(settings)\n elif settings.wtml_command == \"rewrite-disk\":\n return wtml_rewrite_disk(settings)\n elif settings.wtml_command == \"rewrite-urls\":\n return wtml_rewrite_urls(settings)\n elif settings.wtml_command == \"transfer-astrometry\":\n return wtml_transfer_astrometry(settings)\n else:\n die('unrecognized \"wtml\" subcommand ' + settings.wtml_command)\n\n\ndef wtml_merge(settings):\n from urllib.parse import urljoin, urlsplit\n from .folder import Folder\n\n out_folder = Folder()\n out_folder.name = settings.merged_name\n out_folder.thumbnail = settings.merged_thumb_url\n\n rel_base = os.path.dirname(settings.out_path)\n\n for path in settings.in_paths:\n in_folder = Folder.from_file(path)\n cur_base_url = path.replace(os.path.sep, \"/\")\n\n def mutator(url):\n if not url:\n return url\n if urlsplit(url).netloc:\n return url # this URL is absolute\n\n # Resolve this relative URL, using the path of the source WTML\n # as the basis.\n url = urljoin(cur_base_url, url)\n\n # Now go back to filesystem-path land, so that we can use relpath to\n # compute the new path relative to the merged folder file.\n rel = os.path.relpath(url.replace(\"/\", os.path.sep), rel_base)\n\n # Finally, re-express that as a URL\n return rel.replace(os.path.sep, \"/\")\n\n in_folder.mutate_urls(mutator)\n out_folder.children += in_folder.children\n\n with open(settings.out_path, \"wt\", encoding=\"utf8\") as f_out:\n out_folder.write_xml(f_out)\n\n\ndef wtml_report(settings):\n \"\"\"\n Analyze a WTML file, expect to contain a single place/imageset, and report\n its metadata contents.\n \"\"\"\n from bs4 import BeautifulSoup\n from datetime import datetime\n import json\n import textwrap\n from .folder import Folder\n from .imageset import ImageSet\n from .place import Place\n\n f = Folder.from_file(settings.path)\n\n warnings_hack = [0]\n\n def mywarn(*args, **kwargs):\n warn(*args, **kwargs)\n warnings_hack[0] += 1\n\n if len(f.children) != 1:\n mywarn(\n f\"expected WTML file to contain exactly one item; found {len(f.children)}\"\n )\n\n if len(f.children) == 0:\n die(\"cannot proceed if WTML has zero items\")\n\n pl = f.children[0]\n\n if isinstance(pl, ImageSet):\n die(\n \"sorry, this program is too dumb to handle top-level imagesets right now. File a bug!\"\n )\n if not isinstance(pl, Place):\n die(f\"the WTML item must be a Place; found: {c}\")\n\n if pl.foreground_image_set is not None:\n imgset = pl.foreground_image_set\n else:\n die(\"the WTML Place must contain a item\")\n\n # Name:\n\n f_name = f.name\n p_name = pl.name\n i_name = imgset.name\n\n if f_name != p_name:\n mywarn(f\"name of folder ({f_name}) and name of Place ({p_name}) disagree\")\n if f_name != i_name:\n mywarn(f\"name of folder ({f_name}) and name of ImageSet ({i_name}) disagree\")\n\n # Our extended metadata -- needs documentation!\n\n channel_name = None\n item_id = None\n published8601 = None\n\n if pl.annotation:\n try:\n anno_data = json.loads(pl.annotation)\n except Exception as e:\n mywarn(\n f\"Place annotation data is not valid JSON; the text is: {pl.annotation!r}\"\n )\n else:\n channel_name = anno_data.get(\"channel\")\n item_id = anno_data.get(\"itemid\")\n published8601 = anno_data.get(\"publishedUTCISO8601\")\n else:\n mywarn(\"Place contains no Annotation metadata\")\n\n if channel_name is None:\n mywarn(\"Place Annotation metadata does not contain a channel name\")\n channel_report = \"(none specified)\"\n else:\n channel_report = channel_name\n\n if item_id is None:\n mywarn(\"Place Annotation metadata does not contain an itemid\")\n item_id_report = \"(none specified)\"\n else:\n item_id_report = item_id\n\n pubdate = None\n\n if published8601 is not None:\n try:\n pubdate = datetime.fromisoformat(published8601)\n except Error as e:\n mywarn(\n \"publication date in Place Annotation data does not seem to be in ISO8601 format\"\n )\n else:\n if pubdate.tzinfo is None:\n mywarn(\"publication date does not contain timezone information\")\n\n if pubdate is None:\n mywarn(\"Place Annotation metadata does not contain a valid publication date\")\n pubdate_report = \"(unspecified)\"\n else:\n pubdate_report = pubdate\n\n # Text entries\n\n def process_html(text):\n parsed = BeautifulSoup(text, \"html.parser\")\n plain_report = textwrap.wrap(\n parsed.text,\n break_long_words=False,\n break_on_hyphens=False,\n )\n\n tag_report = []\n\n for line in parsed.prettify().splitlines():\n # Determine indent for mo' pretty\n i = 0\n while i < len(line) and line[i] == \" \":\n i += 1\n indent = line[:i]\n\n tag_report += textwrap.wrap(\n line,\n initial_indent=indent,\n subsequent_indent=indent,\n break_long_words=False,\n break_on_hyphens=False,\n )\n\n return plain_report, tag_report\n\n if not pl.description:\n mywarn(\"Place has no Description\")\n desc_plain_report = desc_tag_report = [\"(none)\"]\n else:\n desc_plain_report, desc_tag_report = process_html(pl.description)\n\n if not imgset.credits:\n mywarn(\"ImageSet has no credits\")\n credits_plain_report = credits_tag_report = [\"(none)\"]\n else:\n # NB this can cause MarkupResemblesLocatorWarning if the text is short\n # and contains no HTML tags\n credits_plain_report, credits_tag_report = process_html(imgset.credits)\n\n if not imgset.credits_url:\n mywarn(\"ImageSet has no CreditsUrl\")\n credits_url_report = \"(none)\"\n else:\n credits_url_report = imgset.credits_url\n\n # Finally, report out\n\n print(f\"Filename: {settings.path}\")\n print(f\"Title (no HTML allowed): {f_name}\")\n print(f\"Source channel: {channel_report}\")\n print(f\"Source/credit URL: {credits_url_report}\")\n print(f\"Item ID (should be unique within channel): {item_id_report}\")\n print(f\"Publication date: {pubdate_report}\")\n print()\n\n print(\"Description reduced to plain text:\")\n print()\n for line in desc_plain_report:\n print(\" \", line)\n print()\n print(\"Full HTML description (check links and tags!):\")\n print()\n for line in desc_tag_report:\n print(\" \", line)\n print()\n print(\"Credits reduced to plain text:\")\n print()\n for line in credits_plain_report:\n print(\" \", line)\n print()\n print(\"Full HTML credits (check links and tags!):\")\n print()\n for line in credits_tag_report:\n print(\" \", line)\n\n print()\n n_warnings = warnings_hack[0]\n\n if n_warnings:\n print(f\"Summary: {n_warnings} were flagged\")\n else:\n print(\"Summary: file structure looks OK! Check description and credits HTML.\")\n\n\ndef wtml_rewrite_disk(settings):\n from .folder import Folder, make_filesystem_url_mutator\n\n # Note that data URLs should be relative to the *source* WTML, which is why\n # we're basing against in_path, not out_path.\n rootdir = os.path.abspath(os.path.dirname(settings.in_path))\n mutator = make_filesystem_url_mutator(rootdir)\n\n f = Folder.from_file(settings.in_path)\n f.mutate_urls(mutator)\n\n with open(settings.out_path, \"wt\", encoding=\"utf8\") as f_out:\n f.write_xml(f_out)\n\n\ndef wtml_rewrite_urls(settings):\n from .folder import Folder, make_absolutizing_url_mutator\n\n f = Folder.from_file(settings.in_path)\n f.mutate_urls(make_absolutizing_url_mutator(settings.baseurl))\n\n with open(settings.out_path, \"wt\", encoding=\"utf8\") as f_out:\n f.write_xml(f_out)\n\n\ndef wtml_transfer_astrometry(settings):\n from .folder import Folder\n from .imageset import ImageSet\n from .place import Place\n\n # Tables of preferred entries ...\n\n places = {}\n imagesets = {}\n\n def add_imageset(imgset):\n if imgset is None:\n return # convenience for Place handling\n\n if imgset.name in imagesets:\n print(\n 'note: imageset name \"%s\" appears repeatedly in input file \"%s\"'\n % (imgset.name, settings.in_path)\n )\n else:\n imagesets[imgset.name] = imgset\n\n def add_place(place):\n if place.name in places:\n print(\n 'note: place name \"%s\" appears repeatedly in input file \"%s\"'\n % (place.name, settings.in_path)\n )\n else:\n places[place.name] = place\n\n add_imageset(place.image_set)\n add_imageset(place.background_image_set)\n add_imageset(place.foreground_image_set)\n\n IMAGESET_ASTROMETRIC_ATTRS = [\n \"data_set_type\",\n \"width_factor\",\n \"reference_frame\",\n \"base_degrees_per_tile\",\n \"projection\",\n \"center_x\",\n \"center_y\",\n \"offset_x\",\n \"offset_y\",\n \"rotation_deg\",\n ]\n\n PLACE_ASTROMETRIC_ATTRS = [\n \"data_set_type\",\n \"ra_hr\",\n \"dec_deg\",\n \"latitude\",\n \"longitude\",\n \"distance\",\n \"angular_size\",\n \"zoom_level\",\n \"rotation_deg\",\n \"angle\",\n \"dome_alt\",\n \"dome_az\",\n ]\n\n def update_imageset(imgset):\n if imgset is None:\n return 0\n\n ref = imagesets.get(imgset.name)\n if ref is None:\n return 0\n\n for att in IMAGESET_ASTROMETRIC_ATTRS:\n setattr(imgset, att, getattr(ref, att))\n\n return 1\n\n def update_place(place):\n n_updates = 0\n\n ref = places.get(place.name)\n if ref is not None:\n for att in PLACE_ASTROMETRIC_ATTRS:\n setattr(place, att, getattr(ref, att))\n\n n_updates += 1\n\n n_updates += update_imageset(place.image_set)\n n_updates += update_imageset(place.background_image_set)\n n_updates += update_imageset(place.foreground_image_set)\n return n_updates\n\n # Load up the preferred data\n\n in_folder = Folder.from_file(settings.in_path)\n\n for depth, path, item in in_folder.walk(download=False):\n if isinstance(item, Place):\n add_place(item)\n elif isinstance(item, ImageSet):\n add_imageset(item)\n\n # Now update everything\n\n n_updated_files = 0\n\n for update_path in settings.update_paths:\n folder = Folder.from_file(update_path)\n n_updates = 0\n\n for depth, path, item in folder.walk(download=False):\n if isinstance(item, Place):\n n_updates += update_place(item)\n elif isinstance(item, ImageSet):\n n_updates += update_imageset(item)\n\n print(\"%s: updated %d items\" % (update_path, n_updates))\n\n if n_updates > 0:\n n_updated_files += 1\n\n with open(update_path, \"wt\", encoding=\"utf8\") as f_out:\n folder.write_xml(f_out)\n\n print()\n print(\"Updated %d WTML files.\" % n_updated_files)\n\n\n# The CLI driver:\n\n\ndef entrypoint(args=None):\n \"\"\"The entrypoint for the \\\"wwtdatatool\\\" command-line interface.\n\n Parameters\n ----------\n args : iterable of str, or None (the default)\n The arguments on the command line. The first argument should be\n a subcommand name or global option; there is no ``argv[0]``\n parameter.\n\n \"\"\"\n # Set up the subcommands from globals()\n\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(dest=\"subcommand\")\n commands = set()\n\n for py_name, value in globals().items():\n if py_name.endswith(\"_getparser\"):\n cmd_name = py_name[:-10].replace(\"_\", \"-\")\n subparser = subparsers.add_parser(cmd_name)\n value(subparser)\n commands.add(cmd_name)\n\n # What did we get?\n\n settings = parser.parse_args(args)\n\n if settings.subcommand is None:\n print(\"Run me with --help for help. Allowed subcommands are:\")\n print()\n for cmd in sorted(commands):\n print(\" \", cmd)\n return\n\n py_name = settings.subcommand.replace(\"-\", \"_\")\n\n impl = globals().get(py_name + \"_impl\")\n if impl is None:\n die('no such subcommand \"{}\"'.format(settings.subcommand))\n\n # OK to go!\n\n impl(settings)\n","sub_path":"wwt_data_formats/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":26235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"68813247","text":"from pathlib import Path\nfrom graia.saya import Saya, Channel\nfrom graia.saya.builtins.broadcast.schema import ListenerSchema\n\nfrom avilla.core.execution.message import MessageSend\nfrom avilla.core.message.chain import MessageChain\nfrom avilla.core.relationship import Relationship\nfrom avilla.core.builtins.profile import MemberProfile, GroupProfile\nfrom avilla.core.builtins.elements import Text, Notice\nfrom avilla.core.builtins.elements import Image as IMG\nfrom avilla.core.event.message import MessageEvent\nfrom lib.bank import Bank\nfrom lib import limiter\n\nsaya = Saya.current()\nchannel = Channel.current()\nbank = Bank(\"./data/bank.json\")\n\n\n@channel.use(ListenerSchema(listening_events=[MessageEvent]))\nasync def sendmsg(event: MessageEvent, rs: Relationship):\n if event.message.as_display().startswith(\"#储蓄罐 \"):\n await limiter.limit(\"ATM\", rs, 5)\n msg = event.message.get_first(Text).text[5:]\n if msg == \"注册\":\n try:\n await bank.create_account(rs.ctx.id, 100)\n await rs.exec(\n MessageSend(MessageChain.create([Text(\"注册霖念储蓄罐成功 w~\")]))\n )\n except Exception as e:\n await rs.exec(\n MessageSend(\n MessageChain.create([Text(f\"注册霖念储蓄罐失败 原因是{e} qaq\")])\n )\n )\n elif msg.startswith(\"充值 \") and event.message.has(Notice):\n if not rs.ctx.id == \"2544704967\":\n await rs.exec(\n MessageSend(\n MessageChain.create([Text(\"您不是霖念储蓄罐的管理员,无权进行此操作qwq\")])\n )\n )\n return\n money = int(msg[3:])\n\n try:\n if money < 0 or money > 10000:\n await rs.exec(\n MessageSend(\n MessageChain.create([Text(\"充值金额超出范围(0-10000),请重新输入\")])\n )\n )\n return\n await bank.deposit(event.message.get_first(Notice).target, money)\n await rs.exec(MessageSend(MessageChain.create([Text(\"充值成功!Owo\")])))\n except Exception as e:\n await rs.exec(\n MessageSend(MessageChain.create([Text(f\"充值失败 原因是{e} qaq\")]))\n )\n elif msg == \"查询\":\n try:\n await rs.exec(\n MessageSend(\n MessageChain.create(\n [\n Text(\n f\"你的储蓄罐余额为{await bank.get_balance(rs.ctx.id)}霖念币 Owo\"\n )\n ]\n )\n )\n )\n except Exception as e:\n await rs.exec(\n MessageSend(MessageChain.create([Text(f\"查询失败 原因是{e} qwq\")]))\n )\n elif msg.startswith(\"转账 \") and event.message.has(Notice):\n toid = event.message.get_first(Notice).target\n try:\n money = int(msg[3:])\n if money < 0 or money > 10000:\n await rs.exec(\n MessageSend(\n MessageChain.create([Text(\"转账金额超出范围(0-10000),请重新输入\")])\n )\n )\n return\n await bank.transfer(rs.ctx.id, toid, int(money))\n await rs.exec(MessageSend(MessageChain.create([Text(\"转账成功!OwO\")])))\n except Exception as e:\n await rs.exec(\n MessageSend(MessageChain.create([Text(f\"转账失败 原因是{e} qwq\")]))\n )\n","sub_path":"module/ATM.py","file_name":"ATM.py","file_ext":"py","file_size_in_byte":3949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"471321847","text":"#!/usr/bin/python\n\n\"\"\"\n\nThis program shows how to retrieve info for a single source from the Frost service.\n\nThe HTTP request essentially consists of the following components:\n - the endpoint, frost.met.no/sources\n - the source ID to get information for\n - the client ID used for authentication\n\nThe source ID is read from a command-line argument, while the client ID is read from\nthe environment variable CLIENTID.\n\nSave the program to a file example.py, make it executable (chmod 755 example.py),\nand run it e.g. like this:\n\n $ CLIENTID=8e6378f7-b3-ae4fe-683f-0db1eb31b24ec ./example.py SN18700\n\nor like this to get info for sources matching a pattern:\n\n $ CLIENTID=8e6378f7-b3-ae4fe-683f-0db1eb31b24ec ./example.py SN187*\n\n(Note: the client ID used in the example should be replaced with a real one)\n\nThe program has been tested on the following platforms:\n - Python 2.7.3 on Ubuntu 12.04 Precise\n - Python 2.7.12 and 3.5.2 on Ubuntu 16.04 Xenial\n\n\"\"\"\nCLIENTID=\"c15d4317-5910-423f-8fe6-d7446fa5fc05\"\n\nimport sys, os\nimport requests # See http://docs.python-requests.org/\n\nif __name__ == \"__main__\":\n\n source_id = \"SN9600\"\n client_id = CLIENTID\n\n # issue an HTTP GET request\n r = requests.get(\n 'https://frost.met.no/location/v0.jsonld',\n {'data': ''},\n auth=(client_id, '')\n )\n\n def codec_utf8(s):\n #return s.encode('utf-8').decode('utf-8') # should be used for Python 3\n return s.encode('utf-8') # should be used for Python 2\n\n # extract some data from the response\n if r.status_code == 200:\n for item in r.json()['data']:\n sys.stdout.write('ID: {}\\n'.format(item['id']))\n sys.stdout.write('Name: {}\\n'.format(codec_utf8(item['name'])))\n if 'geometry' in item:\n sys.stdout.write('longitude: {}\\n'.format(item['geometry']['coordinates'][0]))\n sys.stdout.write('latitude: {}\\n'.format(item['geometry']['coordinates'][1]))\n if 'municipality' in item:\n sys.stdout.write('Municipality: {}\\n'.format(codec_utf8(item['municipality'])))\n if 'county' in item:\n sys.stdout.write('County: {}\\n'.format(codec_utf8(item['county'])))\n sys.stdout.write('Country: {}\\n'.format(codec_utf8(item['country'])))\n if 'externalIds' in item:\n for ext_id in item['externalIds']:\n sys.stdout.write('external ID: {}\\n'.format(ext_id))\n else:\n sys.stdout.write('no external IDs found\\n')\n else:\n sys.stdout.write('error:\\n')\n sys.stdout.write('\\tstatus code: {}\\n'.format(r.status_code))\n if 'error' in r.json():\n assert(r.json()['error']['code'] == r.status_code)\n sys.stdout.write('\\tmessage: {}\\n'.format(r.json()['error']['message']))\n sys.stdout.write('\\treason: {}\\n'.format(r.json()['error']['reason']))\n else:\n sys.stdout.write('\\tother error\\n')\n","sub_path":"Christoffer_kode/frost.py","file_name":"frost.py","file_ext":"py","file_size_in_byte":2976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"428443104","text":"# !/usr/bin/python\n# -*- coding:utf-8 -*-\nimport numpy as np\n\nnum_anchors = None\nmap_size = None\nstride_size = None\nbox = None\nH = []\n\n\ndef get_coord(N):\n t = np.arange(N)\n x, y = np.meshgrid(t, t)\n x = x[..., None]\n y = y[..., None]\n\n coord = np.concatenate((y, x, y, x), axis=-1)\n coord = coord[:, :, None, :]\n return coord\n\n\ndef generate_anchor_base(base_size, index, num=4):\n ratios = [1, 1.6, 2, 3, 1 / 1.6, 1 / 2, 1 / 3]\n if num==6:\n ratios = [1, 2, 3, 1 / 2, 1 / 3]\n if num==4 or num==3:\n ratios = [1, 2, 1 / 2]\n py = base_size / 2\n px = base_size / 2\n\n anchors_base = []\n for ratio in ratios:\n h = box[index] * np.sqrt(1 / ratio)\n H.append(h)\n w = box[index] * np.sqrt(ratio)\n anchors_base.append([py - h / 2, px - w / 2, py + h / 2, px + w / 2])\n if num==6:\n t = np.sqrt(box[index] * box[index + 1]) / 2\n H.append(t)\n anchors_base.append([py - t, px - t, py + t, px + t])\n anchors_base = np.array(anchors_base, dtype=np.float32)\n return anchors_base\n\n pass\n\n\ndef create_anchors():\n Anchors = np.zeros((0, 4))\n for i in range(len(num_anchors)):\n anchors_base = generate_anchor_base(stride_size[i], i, num=num_anchors[i])\n coord = get_coord(map_size[i])\n tanchors = coord * stride_size[i] + anchors_base\n tanchors = tanchors.reshape(-1, 4)\n Anchors = np.concatenate((Anchors, tanchors))\n Anchors = Anchors.astype(np.float32)\n return Anchors\n\n\ndef get_box(s_min, s_max, image_size=300, m=6):\n stride = int((int(s_max * 100) - int(s_min * 100)) / (m - 2))\n bbox = np.zeros(m + 1)\n bbox[0] = int(s_min * 100 / 2) / 100\n bbox[1:] = (s_min + np.arange(m) * stride / 100)\n print(bbox)\n\n return bbox * image_size\n\n pass\n\n\ndef get_Anchors(img_size=321, s_min_=0.15, s_max_=0.9, num_anchors_=[8, 8, 8, 8, 8, 8], map_size_=[40, 20, 10, 5, 3, 1],\n stride_size_=[8, 16, 32, 64, 107, 321]):\n global num_anchors, map_size, stride_size, box\n num_anchors = num_anchors_\n map_size = map_size_\n stride_size = stride_size_\n box = get_box(s_min_, s_max_, img_size)\n print(box)\n Anchors = create_anchors()\n return Anchors\n\n\ndef bbox2c_bbox(bboxes):\n y = (bboxes[:, 2:3] + bboxes[:, 0:1]) / 2\n x = (bboxes[:, 3:4] + bboxes[:, 1:2]) / 2\n h = bboxes[:, 2:3] - bboxes[:, 0:1]\n w = bboxes[:, 3:4] - bboxes[:, 1:2]\n return y, x, h, w\ndef get_cAnchors(img_size=300, s_min_=0.2, s_max_=0.9, num_anchors_=[8, 8, 8, 8, 8, 8], map_size_=[40, 20, 10, 5, 3, 1],\n stride_size_=[8, 16, 32, 64, 107, 321]):\n global num_anchors, map_size, stride_size, box\n num_anchors = num_anchors_\n map_size = map_size_\n stride_size = stride_size_\n box = get_box(s_min_, s_max_, img_size)\n print(box)\n Anchors = create_anchors()\n y, x, h, w = bbox2c_bbox(Anchors)\n for i in H:\n print(i)\n return np.concatenate((y, x, h, w), axis=-1)\n\nif __name__ == \"__main__\":\n # Anchors = get_Anchors()\n # print(Anchors.shape)\n cAnchors=get_cAnchors()\n print(cAnchors.shape)\n\n\n pass\n","sub_path":"tool/get_anchors.py","file_name":"get_anchors.py","file_ext":"py","file_size_in_byte":3130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"150854572","text":"#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n#\r\n# Copyright (c) 2013-present SMHI, Swedish Meteorological and Hydrological Institute \r\n# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).\r\n\r\nimport sharkdata_core\r\n\r\n@sharkdata_core.singleton\r\nclass TransectData(object):\r\n \"\"\" \"\"\"\r\n def __init__(self):\r\n \"\"\" \"\"\"\r\n self.clear()\r\n \r\n def clear(self):\r\n \"\"\" \"\"\"\r\n self._transect_dict = {}\r\n self._transect_sequence_no = 0\r\n \r\n def get_transect_data(self, datarow_dict):\r\n \"\"\" \"\"\"\r\n # Create key.\r\n key = self.create_key(datarow_dict)\r\n #\r\n if key in self._transect_dict:\r\n return self._transect_dict[key]\r\n else:\r\n return {}\r\n \r\n def get_transect_sequence_no(self, datarow_dict):\r\n \"\"\" \"\"\"\r\n # Create key.\r\n key = self.create_key(datarow_dict)\r\n #\r\n if key in self._transect_dict:\r\n return str(self._transect_dict[key].get('transect_sequence_no', ''))\r\n else:\r\n return ''\r\n \r\n def create_key(self, datarow_dict):\r\n \"\"\" \"\"\"\r\n key = datarow_dict.get('sample_date', '') + '+'\r\n key += datarow_dict.get('station_name', '') + '+'\r\n key += datarow_dict.get('transect_id', '')\r\n \r\n# key += datarow_dict.get('CruiseIdentifier', '') + '+'\r\n# key += datarow_dict.get('StationNumber', '') + '+'\r\n# key += datarow_dict.get('Transect', '') + '+'\r\n key += datarow_dict.get('transect_direction', '') + '+'\r\n key += datarow_dict.get('transect_start_latitude_dd', '') + '+'\r\n key += datarow_dict.get('transect_start_longitude_dd', '') + '+'\r\n key += datarow_dict.get('transect_length_m', '') + '+'\r\n key += datarow_dict.get('transect_end_latitude_dd', '') + '+'\r\n key += datarow_dict.get('transect_end_longitude_dd', '') + '+'\r\n\r\n# key += datarow_dict.get('section_distance_start_m', '') + '+'\r\n# key += datarow_dict.get('section_distance_end_m', '') + '+'\r\n# key += datarow_dict.get('section_fauna_flora_found', '') + '+'\r\n# key += datarow_dict.get('section_start_depth_m', '') + '+'\r\n# key += datarow_dict.get('section_end_depth_m', '') + '+'\r\n# \r\n# \r\n# key += datarow_dict.get('degree_biofouling', '') + '+'\r\n# key += datarow_dict.get('bitemark', '') + '+'\r\n# key += datarow_dict.get('reproductive_organs', '') + '+'\r\n# key += datarow_dict.get('detached', '') + '+'\r\n# key += datarow_dict.get('epibiont', '') + '+'\r\n# key += datarow_dict.get('stratum_code', '')\r\n\r\n #\r\n return key\r\n \r\n# String TransectDirection = sample.getField(\"sample.transect_direction\"); // ICES: TRDGR.\r\n# String PositioningSystem = sample.getField(\"\"); // ICES: POSYS.\r\n# String TransectStartLatitude = sample.getField(\"sample.transect_start_latitude_dd\"); // ICES: LATRS.\r\n# String TransectStartLongitude = sample.getField(\"sample.transect_start_longitude_dd\"); // ICES: LNTRS.\r\n# String TransectLength = sample.getField(\"sample.transect_length\"); // ICES: TRSLN.\r\n# String TransectEndDetermination = sample.getField(\"\"); // ICES: TREDT.\r\n# String TransectEndLatitude = sample.getField(\"sample.transect_end_latitude_dd\"); // ICES: LATRE.\r\n# String TransectEndLongitude = sample.getField(\"sample.transect_end_longitude_dd\"); // ICES: LNTRE.\r\n# String DepthAdjustment = sample.getField(\"\"); // ICES: DEPAD.\r\n# String TransectEndDepth = sample.getField(\"\"); // ICES: TREDP.\r\n# String MaxVegetationDepth = sample.getField(\"\"); // ICES: MXVEG.\r\n# String SpeciesAtMaxVegetationDepth = sample.getField(\"\"); // ICES: SPVEG.\r\n# String RefCodeList = sample.getField(\"\"); // ICES: RLIST.\r\n# String DataCentreFlag = sample.getField(\"\"); // ICES: DCFLG.\r\n \r\n # Java.\r\n # for (Ices40Transect record : recordList) { \r\n # if (\r\n # record.getCruiseIdentifier().equals(CruiseIdentifier) &&\r\n # record.getStationNumber().equals(StationNumber) &&\r\n # record.getTransect().equals(Transect) &&\r\n # record.getTransectDirection().equals(TransectDirection) && // ICES: TRDGR.\r\n # record.getPositioningSystem().equals(PositioningSystem) && // ICES: POSYS.\r\n # record.getTransectStartLatitude().equals(TransectStartLatitude) && // ICES: LATRS.\r\n # record.getTransectStartLongitude().equals(TransectStartLongitude) && // ICES: LNTRS.\r\n # record.getTransectLength().equals(TransectLength) && // ICES: TRSLN.\r\n # record.getTransectEndDetermination().equals(TransectEndDetermination) && // ICES: TREDT.\r\n # record.getTransectEndLatitude().equals(TransectEndLatitude) && // ICES: LATRE.\r\n # record.getTransectEndLongitude().equals(TransectEndLongitude) && // ICES: LNTRE.\r\n # record.getDepthAdjustment().equals(DepthAdjustment) && // ICES: DEPAD.\r\n # record.getTransectEndDepth().equals(TransectEndDepth) && // ICES: TREDP.\r\n # record.getMaxVegetationDepth().equals(MaxVegetationDepth) && // ICES: MXVEG.\r\n # record.getSpeciesAtMaxVegetationDepth().equals(SpeciesAtMaxVegetationDepth) && // ICES: SPVEG.\r\n # record.getRefCodeList().equals(RefCodeList) && // ICES: RLIST.\r\n # record.getDataCentreFlag().equals(DataCentreFlag) ) { // ICES: DCFLG.\r\n # return record;\r\n # }\r\n # }\r\n # Ices40Transect newRecord = new Ices40Transect();\r\n\r\n \r\n \r\n \r\n \r\n \r\n def load_all_transect_data(self, dataset):\r\n \"\"\" \"\"\"\r\n dataheader = dataset.data_header\r\n for datarow in dataset.data_rows:\r\n datarow_dict = dict(zip(dataheader, map(str, datarow)))\r\n # Create key.\r\n key = self.create_key(datarow_dict) \r\n if not key in self._transect_dict:\r\n self._transect_sequence_no += 1\r\n self._transect_dict[key] = {'transect_sequence_no': str(self._transect_sequence_no)} \r\n \r\n #\r\n\r\n transect_length_m = datarow_dict.get('transect_length_m', '')\r\n if transect_length_m == '':\r\n #\r\n max_section_distance_end_m = self._transect_dict[key].get('max_section_distance_end_m', '0')\r\n new_section_distance_end_m = datarow_dict.get('section_distance_end_m', '0')\r\n #\r\n try:\r\n max_float = float(max_section_distance_end_m.replace(',', '.'))\r\n if new_section_distance_end_m == '':\r\n new_float = 0\r\n else:\r\n new_float = float(new_section_distance_end_m.replace(',', '.'))\r\n #\r\n if new_float > max_float:\r\n self._transect_dict[key]['max_section_distance_end_m'] = str(new_section_distance_end_m).replace(',', '.')\r\n except Exception as e:\r\n print('DEBUG: Transec data exception: ' + str(e))\r\n #\r\n max_sample_max_depth_m = self._transect_dict[key].get('max_sample_max_depth_m', '0')\r\n new_sample_max_depth_m = datarow_dict.get('sample_max_depth_m', '0')\r\n #\r\n if not max_sample_max_depth_m:\r\n self._transect_dict[key]['max_sample_max_depth_m'] = str(new_sample_max_depth_m).replace(',', '.')\r\n #\r\n if max_sample_max_depth_m and new_sample_max_depth_m:\r\n try:\r\n max_float = float(max_sample_max_depth_m.replace(',', '.'))\r\n new_float = float(new_sample_max_depth_m.replace(',', '.'))\r\n #\r\n if new_float > max_float:\r\n self._transect_dict[key]['max_sample_max_depth_m'] = str(new_sample_max_depth_m).replace(',', '.')\r\n except:\r\n print('DEBUG: Transec data exception: ' + str(e))\r\n \r\n def reformat_transect_id(self, transect_id):\r\n \"\"\" \"\"\"\r\n if len(transect_id) > 12:\r\n return transect_id[:5] + '..' + transect_id[-5:]\r\n else:\r\n return transect_id\r\n \r\n def stratum_id(self, data_row):\r\n \"\"\" \"\"\"\r\n if data_row.get('epibiont', '') == 'Y':\r\n return str(5)\r\n if data_row.get('detached', '') == 'Y':\r\n return str(6)\r\n #\r\n return ''\r\n\r\n\r\n ","sub_path":"sharkdata_core/ices_xml_generator/export_ices_transects.py","file_name":"export_ices_transects.py","file_ext":"py","file_size_in_byte":8632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"637466994","text":"# --- File Input-Output ---\r\n# Question 1- Review\r\n# a.\t\r\n# –\tWhat are the main steps we follow in writing text to a file? \r\n# –\tWhat are the functions/methods we use for these steps? \r\n# –\tIllustrate the writing process with a simple but complete example.\r\n# Step 1: open file with mode \"w\"\r\n\r\nfile=open(\"fileName.txt\", \"w\")\r\n# Step 2: write to file\r\nfile.write (string)\r\n# Step 3: Close the file\r\nfile.close()\r\n\r\n\r\n# b.\tSuppose we open a file for writing. What happens if the file does not exist? What happens if the file already exists? \r\n# c.\t\r\n# –\tWhat are the main steps we follow in reading text from a file? \r\n# –\tWhat are the functions/methods we use for these steps?\r\n# –\tIllustrate the reading process with a simple example in which we read the entire contents of the file into a string.\r\n\r\n# Step 1: open file with mode \"r\"\r\nfile=open(\"fileName.txt\", \"r\")\r\n# Step 2: read data from file\r\nstring=file.read ()\r\n# Step 3: Close the file\r\nfile.close()\r\n\r\n\r\n# d.\t\r\n# –\tHow do we open a file to append text to? \r\n# –\tWhat happens if the file does not exist?\r\n# –\tWhat happens if the file already exists?\r\n\r\n\r\n# Step 1: open file with mode \"a\"\r\nfile=open(\"fileName.txt\", \"a\")\r\n# Step 2: write to file\r\nfile.write (string)\r\n# Step 3: Close the file\r\nfile.close()\r\n\r\n\r\n\r\n\r\n\r\n# Question 2 - Copy Files\r\n# a.\tWrite a program to copy a file to another file. Let the input file be twinkle.txt (provided), and the output file is twinkle_copy1.txt.\r\ninputFile = open(\"twinkle.txt\")\r\noutputFile = open(\"twinkle_copy1.txt\", \"w\")\r\n\r\ncontents = inputFile.read()\r\noutputFile.write(contents)\r\n\r\ninputFile.close()\r\noutputFile.close()\r\n\r\nprint(\"done\")\r\n\r\n\r\n\r\n# b.\tWrite a program to copy a file to another file, with each line of text being preceded by the line number, followed by a colon. Start the line numbers at 1. \r\n#Let the input file be twinkle.txt and the output file be twinkle_copy2.txt\r\n\r\ninfile = open(\"twinkle.txt\",\"r\")\r\noutfile = open(\"twinkle_copy2.txt\", \"w\")\r\nlines = infile.readlines()\r\nfor n in range(len(lines)):\r\n outfile.write(str(n+1) +\": \" + lines[n])\r\ninfile.close()\r\noutfile.close()\r\nprint(\"done\")\r\n\r\n\r\n\r\n# Question 3 - Merge Files\r\n# Write a program to merge the two files. The files names set1.txt and names set2.txt are provided. \r\n# Each file has 30 names, each of which is on a line of its own. The names in each file are sorted. The two files have names in common. \r\n# The program is to merge the two files and save the merged list in the output file names merged.txt. \r\n\r\nfname = \"names_set1.txt\"\r\ninfile1 = open(fname)\r\n\r\nfname = \"names_set2.txt\"\r\ninfile2 = open(fname)\r\n\r\nfname = \"names_merged.txt\"\r\noutfile = open(fname, \"w\")\r\n\r\nlist1 = infile1.readlines() \r\nlist2 = infile2.readlines()\r\n\r\nmergeList = (list1 + list2)\r\nfor word in mergeList:\r\n outfile.write(word)\r\n\r\ninfile1.close()\r\ninfile2.close()\r\noutfile.close()\r\n\r\nprint(\"done\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n# Question 4 - Finding Common Words\r\n# Write a program that reads two files and print out all the words they have in common. \r\n# For this question, take a word as a sequence of characters separated by whitespace characters. \r\n# For a quick test, you can take names set1.txt and names set2.txt as input files. Print out all the words they have in common.\r\n\r\ninfile1 = open(\"names_set1.txt\")\r\ninfile2 = open(\"names_set2.txt\")\r\n\r\ntext1 = infile1.read()\r\nwordList1 = text1.split()\r\n\r\ntext2 = infile2.read()\r\nwordList2 = text2.split()\r\n\r\nfor i in wordList1:\r\n for j in wordList2:\r\n if i==j:\r\n print(\"commonWords\", i)\r\n\r\ninfile1.close()\r\ninfile2.close()\r\n\r\n\r\n\r\n\r\n# Question 5 -Word Frequencies\r\n# Write a program to read a text file and print the frequencies (how many times this word occurs) of each word. To keep things simple, take all the words to be in lower case. \r\n# We will take a word as a sequence of letters separated by whitespace character and the following punctuation marks: comma, colon, semi-colon, period, question mark, exclamation mark, and quotation marks. \r\n# For a quick test, use the provided file why.txt for input.\r\n\r\n\r\n\r\n\r\n# open and read the contents of the whole file into a string\r\ninfile = open(\"why.txt\")\r\ntext = infile.read()\r\n\r\n# one way to make the punction marks to be the string to\r\n# separate the tokens is to replace the punction marks\r\n# with the blank character\r\nfor ch in \".,?!;:\\\"'\":\r\n text = text.replace(ch, \" \")\r\n\r\n# get the list of tokens\r\ntokenList = text.split()\r\n\r\n# some tokens may contain non-letter characters.\r\n# We want to remove those tokens\r\n# We also convert the words to lower case\r\nwordList =\"\"\r\nfor e in tokenList:\r\n if e.isalpha():\r\n wordList=wordList+e.lower()\r\n\r\n\r\nfor i in wordList:\r\n c=wordList.count(i)\r\n print (\"count \", c)\r\n","sub_path":"Files.py","file_name":"Files.py","file_ext":"py","file_size_in_byte":4723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"68137607","text":"\n\nfrom xai.brain.wordbase.nouns._celebrant import _CELEBRANT\n\n#calss header\nclass _CELEBRANTS(_CELEBRANT, ):\n\tdef __init__(self,): \n\t\t_CELEBRANT.__init__(self)\n\t\tself.name = \"CELEBRANTS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"celebrant\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_celebrants.py","file_name":"_celebrants.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"465074730","text":"class Player:\n def __init__(self,name,playedCountry,age,countryFrom):\n self.name = name\n self.playedCountry = playedCountry\n self.age = age\n self.countryFrom = countryFrom\n \ndef countPlayers(playersList, country):\n count = 0\n for player in playersList:\n if player.countryFrom.lower() == country.lower():\n count+=1\n return count\n\ndef getPlayerPlayedforMaxCountry(playersList):\n max = 0\n for player in playersList:\n if (len(player.playedCountry) > max):\n max = len(player.playedCountry)\n for player in playersList:\n if(len(player.playedCountry) == max):\n name = player.name\n return name\n\nif __name__ == '__main__':\n n=int(input())\n playerList = []\n for i in range (n):\n name = input()\n countCountry = int(input())\n playedCountry = []\n for j in range (countCountry):\n country = input()\n playedCountry.append(country)\n age = int(input())\n countryFrom = input()\n playerObj = Player(name, playedCountry, age, countryFrom)\n playerList.append(playerObj)\n country = input()\n count = countPlayers(playerList, country)\n name = getPlayerPlayedforMaxCountry(playerList)\n print(count)\n print(name)","sub_path":"pythonCPA/12Mar_players.py","file_name":"12Mar_players.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"43512081","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n=========================================================================\nThis file is part of LyX Notebook, which works with LyX but is an\nindependent project. License details (MIT) can be found in the file\nCOPYING.\n\nCopyright (c) 2012 Allen Barker\n=========================================================================\n\nThis is the main module of the Lyx Notebook program; the `lyxNotebook.py` script\njust does basic startup stuff like making sure a Lyx Notebook process is not\nalready running.\n\nThis module contains the implementation of the high-level controller class\n`ControllerLyxWithInterpreter`. This class mediates between the Lyx program\nand one or more interpreters for interpreting code. It gets basic commands\nfrom Lyx, executes them, and pushes the appropriate actions back to Lyx. Some\nof these actions involve running code in an interpreter. In these cases, the\ncontroller sends the code to the appropriate interpreter, gets the results\nback, and then pushes the results to Lyx.\n\n\"\"\"\n\nfrom __future__ import print_function, division\n#import easygui as eg\nimport easygui_096 as eg # Use a local, modified version.\nimport re\nimport sys\nimport os\nimport time\nimport signal\n\n# Local file imports.\nimport lyxNotebook_user_settings\nfrom interact_with_lyx_cells import InteractWithLyxCells, Cell\nfrom external_interpreter import ExternalInterpreter\nimport interpreter_specs # Specs for all the interpreters which are allowed.\nimport keymap # The current mapping of keys to Lyx Notebook functions.\n\n\nclass IndentCalc(object):\n \"\"\"A class that is used for Python cells, to calculate the indentation\n levels. This is used so that users can write code like in a file, without\n the extra blank lines which are often required in the interpreter. A blank\n line is sent automatically when the code indentation level reaches zero,\n going downward.\n\n The class must calculate explicit and implicit line continuations, since this\n affects the indentation calculation. The indentation is also incremented\n if a colon is found on a line but not in a comment, string, or within any\n parens, curly braces, or brackets. This is to handle one-liners like\n \"if x==4: return\"\n After handling colons the calculated indententation values are no longer\n strictly correct literally, but they still works in the \"down to zero\"\n calculation, which is what is important.\n\n An instance of this object should be passed each physical line, one by one.\n It then makes calculations concerning the logical line structure. There\n are no side effects, so results can be ignored for non-Python code.\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.parens = 0\n self.brackets = 0\n self.curlys = 0\n self.in_string1 = False\n self.in_string2 = False\n self.backslash_continuation = False\n self.indentation_level = 0\n self.indentation_level_down_to_zero = False\n\n def in_string_literal(self):\n return self.in_string1 or self.in_string2\n\n def in_paren_bracket_curly(self):\n return self.parens > 0 or self.brackets > 0 or self.curlys > 0\n\n def in_explicit_line_continuation(self):\n return self.backslash_continuation\n\n def in_implicit_line_continuation(self):\n return self.in_paren_bracket_curly() or self.in_string2\n\n def in_line_continuation(self):\n return self.in_explicit_line_continuation() or self.in_implicit_line_continuation()\n\n def indent_level(self):\n return self.indentation_level\n\n def indent_level_down_to_zero(self):\n return self.indentation_level_down_to_zero\n\n def update_for_physical_line(self, code_line):\n \"\"\"The IndentCalc class should be sequentially passed physical lines, via\n this function.\"\"\"\n\n # \"indentation down to zero\" is only considered true right after the first\n # non-continued physical line which has indentation level zero when the\n # previous line had a higher level, so always reset for each physical line\n self.indentation_level_down_to_zero = False\n\n # detect a blank line (possibly with a comment) and do nothing else\n stripped_line = code_line.rstrip() # strip off trailing whitespace\n if len(stripped_line) == 0:\n self.backslash_continuation = False # assume blanks unset explicit continuation\n return\n first_nonwhitespace = re.search(\"\\S\", stripped_line)\n if first_nonwhitespace == \"#\":\n self.backslash_continuation = False\n return\n\n # update the indentation level (unless line is continued)\n if not self.in_line_continuation():\n new_level = first_nonwhitespace.start()\n if self.indentation_level > 0 and new_level == 0:\n self.indentation_level_down_to_zero = True\n self.indentation_level = new_level\n\n # backslash continuation only holds for one line (unless reset later at end)\n # this was already used in calculating self.in_line_continuation() above\n self.backslash_continuation = False\n\n # go through each char in the line, updating paren counts, etc.\n # note that i is the index into the line stripped_line\n backslash_escape = False\n # fake a C-style loop, so we can jump ahead easily by resetting i\n i = -1\n while True:\n\n i += 1\n if i >= len(stripped_line): break\n char = stripped_line[i]\n\n # first handle backslash escape mode... we always ignore the next char,\n # and the only cases we care about are one-character backslash escapes\n # (let Python worry about any syntax errors with backslash outside strings)\n if backslash_escape:\n backslash_escape = False\n continue\n\n # handle the backslash char, either line continuation or escape\n if char == \"\\\\\":\n if i == len(stripped_line) - 1: # line continuation\n self.backslash_continuation = True\n continue # could also break, since at end of line\n else: # start a backslash escape\n # this is only valid in strings, but let Python catch any errors there\n backslash_escape = True\n continue\n\n # look for string delimiters and toggle string modes\n if char == \"\\\"\":\n # if in a string, then we got the closing quote\n if self.in_string1: self.in_string1 = False\n # check if this is part of a triple-quote string\n elif (i <= len(stripped_line) - 3 and\n stripped_line[i+1] == \"\\\"\" and stripped_line[i+2] == \"\\\"\"):\n if self.in_string2: self.in_string2 = False\n else: self.in_string2 = True\n i += 2 # increment past the second two quotes of the triple-quote\n # otherwise we start a new single-quote string\n else: self.in_string1 = True\n continue\n\n # ignore all else inside strings\n if self.in_string_literal(): continue\n\n # if at a comment begin then nothing more to do\n if char == \"#\": break\n\n # update counts for general delimiters\n if char == \"(\": self.parens += 1\n elif char == \")\": self.parens -= 1\n elif char == \"[\": self.brackets += 1\n elif char == \"]\": self.brackets -= 1\n elif char == \"{\": self.curlys += 1\n elif char == \"}\": self.curlys -= 1\n\n # Increase indent if a colon is found not inside a comment, string,\n # or any paren/bracket/curly delimiters. We've already ruled out all\n # cases but those delimeters above.\n #\n # This is to allow one-liners like\n # def sqr(x): return x*x\n # and like the \"if\" line below.\n #\n # The indent level will not be exact when separate-line colons are found,\n # but the \"down to zero\" will work. This could be improved, if necessary,\n # by checking whether the colon is at the end of the line (except for\n # whitespace and comments) and not incrementing in those cases.\n if char == \":\" and not self.in_paren_bracket_curly(): self.indentation_level += 3\n return\n\n\nclass InterpreterProcess(object):\n \"\"\"An instance of this class represents a data record for a running\n interpreter process. Contains an ExternalInterpreter instance for that\n process, but also has an IndentCalc instance, and keeps track of the most\n recent prompt received from the interpreter.\"\"\"\n\n def __init__(self, spec):\n self.spec = spec\n self.most_recent_prompt = self.spec[\"main_prompt\"]\n self.indent_calc = IndentCalc()\n self.external_interp = ExternalInterpreter(self.spec)\n\n\nclass InterpreterProcessCollection(object):\n \"\"\"A class to hold multiple InterpreterProcess instances. There will\n probably only be a single instance, but multiple instances should not cause\n problems. Basically a dict mapping (bufferName,inset_specifier) tuples to\n InterpreterProcess class instances. Starts processes when necessary.\"\"\"\n\n def __init__(self, current_buffer):\n if lyxNotebook_user_settings.separate_interpreters_for_each_buffer is False:\n current_buffer = \"___dummy___\" # force all to use same buffer if not set\n self.interpreter_spec_list = [specName.params\n for specName in interpreter_specs.all_specs]\n self.num_specs = len(self.interpreter_spec_list)\n self.inset_specifier_to_interpreter_spec_dict = {}\n self.all_inset_specifiers = []\n for spec in self.interpreter_spec_list:\n self.inset_specifier_to_interpreter_spec_dict[spec[\"inset_specifier\"]] = spec\n self.all_inset_specifiers.append(spec[\"inset_specifier\"])\n self.reset_all_interpreters_for_all_buffers(current_buffer)\n\n def reset_all_interpreters_for_all_buffers(self, current_buffer=\"\"):\n \"\"\"Reset all the interpreters, restarting any not-on-demand ones for the\n buffer current_buffer (unless it equals the empty string). This also\n frees any processes for former buffers, such as for closed buffers and\n renamed buffers.\"\"\"\n self.main_dict = {} # map (bufferName,inset_specifier) tuple to InterpreterProcess\n # Start up not-on-demand interpreters, but only for the current buffer\n # (in principle we could use buffer-next to get all buffers and start for all,\n # but they may not all even # use Lyx Notebook).\n if current_buffer != \"\":\n self.reset_for_buffer(current_buffer)\n\n def reset_for_buffer(self, buffer_name, inset_specifier=\"\"):\n \"\"\"Reset the interpreter for inset_specifier cells for buffer buffer_name.\n Restarts the whole process. If inset_specifier is the empty string then\n reset for all inset specifiers.\"\"\"\n if lyxNotebook_user_settings.separate_interpreters_for_each_buffer is False:\n buffer_name = \"___dummy___\" # force all to use same buffer if not set\n inset_specifier_list = [inset_specifier]\n if inset_specifier == \"\": # do all if empty string\n inset_specifier_list = self.all_inset_specifiers\n for inset_specifier in inset_specifier_list:\n key = (buffer_name, inset_specifier)\n spec = self.inset_specifier_to_interpreter_spec_dict[inset_specifier]\n if key in self.main_dict: del self.main_dict[key]\n if not spec[\"run_only_on_demand\"]:\n self.get_interpreter_process(buffer_name, inset_specifier)\n\n def get_interpreter_process(self, buffer_name, inset_specifier):\n \"\"\"Get interpreter process, creating/starting one if one not there already.\"\"\"\n if lyxNotebook_user_settings.separate_interpreters_for_each_buffer is False:\n buffer_name = \"___dummy___\" # force all to use same buffer if not set\n key = (buffer_name, inset_specifier)\n if not key in self.main_dict:\n msg = \"Starting interpreter for \" + inset_specifier\n if lyxNotebook_user_settings.separate_interpreters_for_each_buffer is True:\n msg += \", for buffer:\\n \" + buffer_name\n print(msg)\n self.main_dict[key] = InterpreterProcess(\n self.inset_specifier_to_interpreter_spec_dict[inset_specifier])\n return self.main_dict[key]\n\n def print_start_message(self):\n start_msg = \"Running for \" + str(self.num_specs) + \\\n \" possible interpreters (cell languages):\\n\"\n interp_str = \"\"\n for i in range(self.num_specs):\n interp_str += \" \" + self.interpreter_spec_list[i][\"inset_specifier\"]\n interp_str += \" (label=\\\"\" + self.interpreter_spec_list[i][\"prog_name\"] + \"\\\"\"\n if not self.interpreter_spec_list[i][\"run_only_on_demand\"]:\n interp_str += \", autostarted in current buffer\"\n interp_str += \")\\n\"\n start_msg += interp_str\n print(start_msg)\n\n\nclass ControllerLyxWithInterpreter(object):\n \"\"\"This class is the high-level controller class which deals with user\n interactions and which manages the Lyx process and the interpreter processes.\n The interpreter specifications are read from the module interpreter_specs. The\n list interpreter_specs.all_specs in that module is assumed to contains all the\n specs.\"\"\"\n\n def __init__(self, clientname):\n\n self.no_echo = lyxNotebook_user_settings.no_echo\n self.buffer_replace_on_batch_eval = lyxNotebook_user_settings.buffer_replace_on_batch_eval\n\n # Set up interactions with Lyx.\n self.clientname = clientname\n self.lyx_process = InteractWithLyxCells(clientname)\n\n # Initialize the collection of interpreter processes.\n self.all_interps = InterpreterProcessCollection(\n self.lyx_process.server_get_filename()) # buffer name is file name\n self.all_interps.print_start_message()\n\n # Display a startup notification message in Lyx.\n message = \"LyX Notebook is now running...\"\n self.lyx_process.show_message(message)\n #self.display_popup_message(message=message, text=startMsg, seconds=3)\n\n # Start up the command loop.\n self.server_notify_loop()\n return # never executed; command loop above continues until sys.exit\n\n def reset_interpreters_for_buffer(self, buffer_name=\"\"):\n \"\"\"Reset all the interpreters for the buffer, starting completely new processes\n for them. If buffer_name is empty the current buffer is used.\"\"\"\n if buffer_name == \"\": buffer_name = self.lyx_process.server_get_filename()\n self.all_interps.reset_for_buffer(buffer_name)\n return\n\n def reset_all_interpreters_for_all_buffers(self):\n \"\"\"Reset all the interpreters for all buffers, starting not-on-demand\n interpreters for the current buffer.\"\"\"\n current_buffer = self.lyx_process.server_get_filename()\n self.all_interps.reset_all_interpreters_for_all_buffers(current_buffer)\n return\n\n def server_notify_loop(self):\n \"\"\"This is the main command loop, getting commands from Lyx and executing\n them.\"\"\"\n\n self.keymap = dict(keymap.all_commands_and_keymap) # dict mapping keys to commands\n\n while True:\n # Wait for a bound key in Lyx to be pressed, and get it when it is.\n key_pressed = self.lyx_process.wait_for_server_notify()\n if not key_pressed in self.keymap:\n continue # Key to ignore.\n\n # Eat any buffered events (notify or otherwise): avoid annoying user.\n self.lyx_process.get_server_event(info=False, error=False, notify=False)\n\n # Look up the action for the key.\n key_action = self.keymap[key_pressed]\n\n # =====================================================================\n # First, look for submenu call; open menu and reset key_action if found.\n # =====================================================================\n\n if key_action == \"pop up submenu\": # handle the pop-up menu option first\n choices = []\n for key, command in keymap.all_commands_and_keymap:\n if key is not None:\n key = key.replace(\"Shift+\", \"S-\") # this is to align columns\n key += \" \"*(5-len(key))\n else:\n key = \" \"*5\n choices.append(key + \" \" + command)\n choice_str = eg.choicebox(\n msg=\"Choose an action or click 'cancel'...\",\n title=\"LyX Notebook Submenu\",\n choices=choices,\n sort_choices=False,\n monospace_font=True,\n lines_to_show=len(choices))\n if choice_str is None:\n continue\n choice_str = choice_str[5:].strip() # EasyGUI returns whole line.\n key_action = choice_str\n\n # ====================================================================\n # Handle the general key actions, including commands set from submenu.\n # ====================================================================\n\n print(\"LyxNotebook processing user command:\", key_action)\n self.lyx_process.show_message(\"Processing user command: \" + key_action)\n\n #\n # Goto cell commands.\n #\n\n if key_action == \"goto next any cell\":\n self.lyx_process.open_all_cells() # gotoNextCell() needs open cells for now\n self.lyx_process.goto_next_cell()\n # self.lyx_process.goto_next_cell2() # alternate implementation, experimental\n\n elif key_action == \"goto prev any cell\":\n self.lyx_process.open_all_cells() # gotoPrevCell() needs open cells for now\n self.lyx_process.goto_prev_cell()\n\n elif key_action == \"goto next code cell\":\n self.lyx_process.open_all_cells() # gotoNextCell() needs open cells for now\n self.lyx_process.goto_next_cell(output=False)\n\n elif key_action == \"goto prev code cell\":\n self.lyx_process.open_all_cells() # gotoPrevCell() needs open cells for now\n self.lyx_process.goto_prev_cell(output=False)\n\n elif key_action == \"goto next init cell\":\n self.lyx_process.open_all_cells() # gotoNextCell() needs open cells for now\n self.lyx_process.goto_next_cell(standard=False, output=False)\n\n elif key_action == \"goto prev init cell\":\n self.lyx_process.open_all_cells() # gotoPrevCell() needs open cells for now\n self.lyx_process.goto_prev_cell(standard=False, output=False)\n\n elif key_action == \"goto next standard cell\":\n self.lyx_process.open_all_cells() # gotoNextCell() needs open cells for now\n self.lyx_process.goto_next_cell(init=False, output=False)\n\n elif key_action == \"goto prev standard cell\":\n self.lyx_process.open_all_cells() # gotoPrevCell() needs open cells for now\n self.lyx_process.goto_prev_cell(init=False, output=False)\n\n #\n # Ordinary cell-evaluate commands, done explicitly in Lyx.\n #\n\n elif key_action == \"evaluate current cell\":\n self.evaluate_lyx_cell()\n\n elif key_action == \"evaluate current cell after reinit\":\n print(\"Restarting all interpreters, single-interp restart unimplemented.\")\n self.reset_interpreters_for_buffer() # TODO currently restarts them all\n self.evaluate_lyx_cell()\n\n elif key_action == \"evaluate all code cells\":\n self.evaluate_all_code_cells()\n\n elif key_action == \"evaluate all code cells after reinit\":\n self.reset_interpreters_for_buffer()\n self.evaluate_all_code_cells()\n\n elif key_action == \"evaluate all init cells\":\n self.evaluate_all_code_cells(standard=False)\n\n elif key_action == \"evaluate all init cells after reinit\":\n self.reset_interpreters_for_buffer()\n self.evaluate_all_code_cells(standard=False)\n\n elif key_action == \"evaluate all standard cells\":\n self.evaluate_all_code_cells(init=False)\n\n elif key_action == \"evaluate all standard cells after reinit\":\n self.reset_interpreters_for_buffer()\n self.evaluate_all_code_cells(init=False)\n\n #\n # Batch evaluation commands.\n #\n # TODO: could clean up and move bufferReplaceOnBatchEval conditionals to the\n # replaceCurrentBufferFile function (after renaming it slightly)\n\n elif key_action == \"toggle buffer replace on batch eval\":\n self.buffer_replace_on_batch_eval = not self.buffer_replace_on_batch_eval\n self.lyx_process.show_message(\"toggled buffer replace on batch eval to: \"\n + str(self.buffer_replace_on_batch_eval))\n\n elif key_action == \"revert to most recent batch eval backup\":\n self.revert_to_most_recent_batch_eval_backup(messages=True)\n\n elif key_action == \"batch evaluate all code cells\":\n to_file_name = self.batch_evaluate_all_code_cells_to_lyx_file(\n init=True, standard=True, messages=True)\n if not self.buffer_replace_on_batch_eval:\n self.lyx_process.process_lfun(\"file-open\", to_file_name)\n else:\n self.replace_current_buffer_file(to_file_name,\n reload_buffer=True, messages=True)\n\n elif key_action == \"batch evaluate all code cells after reinit\":\n self.reset_interpreters_for_buffer()\n to_file_name = self.batch_evaluate_all_code_cells_to_lyx_file(\n init=True, standard=True, messages=True)\n if not self.buffer_replace_on_batch_eval:\n self.lyx_process.process_lfun(\"file-open\", to_file_name)\n else:\n self.replace_current_buffer_file(to_file_name,\n reload_buffer=True, messages=True)\n\n elif key_action == \"batch evaluate all init cells\":\n to_file_name = self.batch_evaluate_all_code_cells_to_lyx_file(\n init=True, standard=False, messages=True)\n if not self.buffer_replace_on_batch_eval:\n self.lyx_process.process_lfun(\"file-open\", to_file_name)\n else:\n self.replace_current_buffer_file(to_file_name,\n reload_buffer=True, messages=True)\n\n elif key_action == \"batch evaluate all init cells after reinit\":\n self.reset_interpreters_for_buffer()\n to_file_name = self.batch_evaluate_all_code_cells_to_lyx_file(\n init=True, standard=False, messages=True)\n if not self.buffer_replace_on_batch_eval:\n self.lyx_process.process_lfun(\"file-open\", to_file_name)\n else:\n self.replace_current_buffer_file(to_file_name,\n reload_buffer=True, messages=True)\n\n elif key_action == \"batch evaluate all standard cells\":\n to_file_name = self.batch_evaluate_all_code_cells_to_lyx_file(\n init=False, standard=True, messages=True)\n if not self.buffer_replace_on_batch_eval:\n self.lyx_process.process_lfun(\"file-open\", to_file_name)\n else:\n self.replace_current_buffer_file(to_file_name,\n reload_buffer=True, messages=True)\n\n elif (key_action ==\n \"batch evaluate all standard cells after reinit\"):\n self.reset_interpreters_for_buffer()\n to_file_name = self.batch_evaluate_all_code_cells_to_lyx_file(\n init=False, standard=True, messages=True)\n if not self.buffer_replace_on_batch_eval:\n self.lyx_process.process_lfun(\"file-open\", to_file_name)\n else:\n self.replace_current_buffer_file(to_file_name,\n reload_buffer=True, messages=True)\n\n #\n # Misc. commands.\n #\n\n elif key_action == \"reinitialize current interpreter\":\n print(\"Not implemented, restarting all interpreters.\")\n self.reset_interpreters_for_buffer()\n # TODO, currently restarts all: need to look up current interp\n self.lyx_process.show_message(\"all interpreters reinitialized\")\n\n elif key_action == \"reinitialize all interpreters for buffer\":\n self.reset_interpreters_for_buffer()\n self.lyx_process.show_message(\"all interpreters for buffer reinitialized\")\n\n elif key_action == \"reinitialize all interpreters for all buffers\":\n self.reset_all_interpreters_for_all_buffers()\n self.lyx_process.show_message(\n \"all interpreters for all buffer reinitialized\")\n\n elif key_action == \"write all code cells to files\":\n file_prefix = self.lyx_process.server_get_filename()\n if file_prefix.rstrip()[-4:] != \".lyx\": continue\n file_prefix = file_prefix.rstrip()[0:-4]\n data_tuple_list = []\n for spec in self.all_interps.interpreter_spec_list:\n # currently the interactWithLyx module does not make use of any\n # of the interpreterSpec data or its format, so we need to\n # look some things up to pass in\n inset_specifier = spec[\"inset_specifier\"]\n file_suffix = spec[\"file_suffix\"]\n # data tuple format is (filename, inset_specifier, commentBeginChar)\n data_tuple_list.append((\n file_prefix + \".allcells.\" +\n inset_specifier + file_suffix,\n inset_specifier,\n spec[\"comment_line\"]\n ))\n self.lyx_process.write_all_cell_code_to_file(data_tuple_list)\n self.lyx_process.show_message(\"all code cells were written to files\")\n\n elif key_action == \"insert most recent graphic file\":\n self.lyx_process.insert_most_recent_graphic_as_inset()\n self.lyx_process.show_message(\"inserted the most recent graphic file\")\n\n elif key_action == \"kill lyx notebook process\":\n sys.exit(0)\n\n elif key_action == \"prompt echo on\":\n self.no_echo = False\n\n elif key_action == \"prompt echo off\":\n self.no_echo = True\n\n elif key_action == \"toggle prompt echo\":\n self.no_echo = not self.no_echo\n message = \"toggled prompt echo to \" + str(not self.no_echo)\n self.lyx_process.show_message(message)\n\n elif key_action == \"evaluate newlines as current cell\":\n self.evaluate_lyx_cell(just_send_newlines=True)\n self.lyx_process.show_message(\"evaluated newlines as current cell\")\n\n #\n # Commands to open and close cells.\n #\n\n elif key_action == \"open all cells\":\n self.lyx_process.open_all_cells()\n self.lyx_process.show_message(\"opened all cells\")\n\n elif key_action == \"close all cells\":\n self.lyx_process.close_all_cells_but_current()\n self.lyx_process.show_message(\"closed all cells\")\n\n elif key_action == \"open all output cells\":\n self.lyx_process.open_all_cells(init=False, standard=False)\n self.lyx_process.show_message(\"opened all output cells\")\n\n elif key_action == \"close all output cells\":\n self.lyx_process.close_all_cells_but_current(init=False, standard=False)\n self.lyx_process.show_message(\"closed all output cells\")\n\n else:\n pass # ignore command from server-notify if it is not recognized\n return # never executed; loop forever or sys.exit\n\n def evaluate_all_code_cells(self, init=True, standard=True):\n \"\"\"Evaluate all cells. Quits evaluation between cells if any Lyx Notebook\n command key is pressed (any key bound to server-notify). The flags can\n be used to only evaluate certain types of cells.\"\"\"\n\n # first set up code to check between cell evals whether user wants to halt\n\n # initialize the relevant flag in the lyxProcess class\n self.lyx_process.ignored_server_notify_event = False\n # eat any server events from Lyx (after the NOTIFY command to do the eval)\n self.lyx_process.get_server_event(info=False, error=False, notify=False)\n\n # define a local function to check and query the user if a NOTIFY was ignored\n def check_for_ignored_server_notify():\n \"\"\"Return True if a server-notify was ignored and user wants to quit.\"\"\"\n # eat all events between cell evals, and check if NOTIFY was ignored\n self.lyx_process.get_server_event(info=False, error=False, notify=False)\n if self.lyx_process.ignored_server_notify_event is True:\n msg = \"Halt multi-cell evaluation at the current point?\"\n choices = ([\"Yes\", \"No\"])\n reply = eg.buttonbox(msg, choices=choices)\n if reply == \"Yes\":\n return True\n self.lyx_process.ignored_server_notify_event = False\n return False\n\n # now get cell count data and print a nice message\n num_init_cells, num_standard_cells, num_output_cells = \\\n self.lyx_process.get_global_cell_info()\n print(\"There are\", num_init_cells+num_standard_cells, \"code cells:\",\n num_standard_cells, \"Standard cells and\", num_init_cells, \"Init cells.\")\n if init and standard: print(\"Evaluating all the code cells.\")\n elif init: print(\"Evaluating all the Init cells only.\")\n elif standard: print(\"Evaluating all the Standard cells only.\")\n\n # cycle through the Init cells and then the Standard cells, evaluating\n if init:\n if num_init_cells > 0:\n self.lyx_process.goto_buffer_begin()\n self.lyx_process.open_all_cells(output=False, standard=False)\n for i in range(num_init_cells):\n user_wants_to_halt = check_for_ignored_server_notify()\n if user_wants_to_halt:\n print(\"Halting multi-cell evaluation before Init cell\", i+1,\n \"(a key bound to\\nserver-notify was pressed).\")\n return\n self.lyx_process.goto_next_cell(output=False, standard=False)\n self.evaluate_lyx_cell()\n if standard:\n if num_standard_cells > 0:\n self.lyx_process.goto_buffer_begin()\n self.lyx_process.open_all_cells(output=False, init=False)\n for i in range(num_standard_cells):\n user_wants_to_halt = check_for_ignored_server_notify()\n if user_wants_to_halt:\n print(\"Halting multi-cell evaluation before Standard cell\", i+1,\n \"(a key bound to\\nserver-notify was pressed).\")\n return\n self.lyx_process.goto_next_cell(output=False, init=False)\n self.evaluate_lyx_cell()\n print(\"Finished multi-cell evaluation.\")\n return\n\n def batch_evaluate_all_code_cells_to_lyx_file(self, init=True, standard=True,\n messages=False):\n \"\"\"Evaluate all the cells of the flagged basic types, and then write them\n to an output .lyx file. The filename of the new file is returned.\"\"\"\n # TODO: also could print nice message to terminal like in regular routine\n\n if not init and not standard: return None\n if init and not standard: cell_types = \"Init\"\n if not init and standard: cell_types = \"Standard\"\n if init and standard: cell_types = \"Init and Standard\"\n\n if messages:\n self.lyx_process.show_message(\"Batch evaluating all %s cells.\" % (cell_types,))\n # get all cell text from the Lyx auto-save file (saves it as a side effect)\n all_cells = self.lyx_process.get_all_cell_text(use_latex_export=False)\n\n # evaluate all the cells in the list (results pasted onto the cells)\n self.evaluate_list_of_cell_classes(all_cells, init=init, standard=standard,\n messages=messages)\n\n # get current directory data (also changes current directory to buffer's dir)\n current_dir_data = self.lyx_process.get_updated_lyx_directory_data()\n\n # calc the name of the auto-save file and the new .lyx file's name\n from_file_name = current_dir_data[2] # prefer auto-save file\n if from_file_name == \"\": from_file_name = current_dir_data[1] # buffer's file\n to_file_name = current_dir_data[3][:-4] + \".newOutput.lyx\"\n\n # create the new .lyx file from the evaluated list of cells\n self.lyx_process.replace_all_cell_text_in_lyx_file(\n from_file_name, to_file_name, all_cells, init=init, standard=standard)\n\n if messages:\n self.lyx_process.show_message(\n \"Finished batch evaluation of all %s cells, wait for any buffer updates.\"\n % (cell_types,))\n return to_file_name\n\n def evaluate_list_of_cell_classes(self, cell_list, init=True, standard=True,\n messages=False):\n \"\"\"Evaluates the list of Cell class instances, first the init cells and\n then the standard cells (unless one of the flags is set False). Used for\n batch processing and faster evaluation of a group of cells. The resulting\n output is pasted onto the cells in cell_list as the data field\n evaluation_output. The list cell_list is returned as a convenience.\"\"\"\n msg = \"Evaluating %s cell %s (%s cell).\"\n if init:\n num = 0\n for cell in cell_list:\n basic_type, inset_spec = cell.get_cell_type()\n if basic_type == \"Init\":\n num += 1\n if messages:\n self.lyx_process.show_message(msg % (basic_type, num, inset_spec))\n self.evaluate_code_in_cell_class(cell)\n if messages:\n self.lyx_process.show_message(\"Finished Init cell evaluations.\")\n if standard:\n num = 0\n for cell in cell_list:\n basic_type, inset_spec = cell.get_cell_type()\n if basic_type == \"Standard\":\n num += 1\n if messages:\n self.lyx_process.show_message(msg % (basic_type, num, inset_spec))\n self.evaluate_code_in_cell_class(cell)\n if messages:\n self.lyx_process.show_message(\"Finished Standard cell evaluations.\")\n return cell_list\n\n def evaluate_lyx_cell(self, just_send_newlines=False):\n \"\"\"Evaluate the code cell at the current cursor position in Lyx. Ignore if\n not inside a code cell.\"\"\"\n\n # get the code text from the current cell\n code_cell_text = self.lyx_process.get_current_cell_text()\n\n if code_cell_text is None:\n return # Not in a cell in the first place.\n\n # check that cell is code (could just check output=None later, but do here, too)\n basicType, inset_specifier_language = code_cell_text.get_cell_type()\n if basicType == \"Output\":\n return # Not a code cell.\n\n # TODO: optional line wrapping at the Python level (but currently works OK\n # with listings). Currently does nothing. Can do the same with output\n # text below, but not currently done. Could also highlight if that\n # would display in inset and be removable for later evals.\n code_cell_text = self.wrap_long_lines(code_cell_text) # do any line-wrapping\n\n # do the actual code evaluation and get the output\n output = self.evaluate_code_in_cell_class(code_cell_text, just_send_newlines)\n\n #\n # Replace the old output with the new output (and maybe replace input).\n #\n\n \"\"\"\n print(\"debug code list being replaced with:\\n\", code_cell_text)\n print(\"debug end of code list being replaced with\")\n print(\"debug code output being replaced with:\\n\", output)\n print(\"debug end of code output being replaced with\")\n \"\"\"\n\n # Rewrite input, might be useful for wrapping or formatting.\n # Bug on rewrite with empty last line! Empty last lines are\n # ignored by listings when saving to Latex (and sometimes in the Lyx inset).\n # Thus they are not read in correctly after having been saved, and are not\n # printed. So perhaps better not to display them in LyX: they won't print.\n rewrite_code_cells = True\n if rewrite_code_cells and not just_send_newlines:\n self.lyx_process.replace_current_cell_text(code_cell_text, assert_inside_cell=True)\n elif not just_send_newlines:\n # some blue selection-highlighting feedback even when text not replaced...\n self.lyx_process.process_lfun(\"inset-select-all\")\n self.lyx_process.process_lfun(\"escape\")\n\n # Note there is a bug in listings 1.3 at least: showlines=true doesn't work\n # and will not show empty lines at the end of a listings box... Adding spaces\n # or tabs on the line does not help, workaround of redefining formfeed in\n # listings is apparently blocked by passthru Flex option. So warn users, minor\n # bug remains.\n # if len(output) > 0 and output[-1] == \"\\n\": output[-1] = \"\\f\\n\"\n\n basicType, inset_specifier = code_cell_text.get_cell_type()\n self.lyx_process.replace_current_output_cell_text(output,\n assert_inside_cell=True, inset_specifier=inset_specifier)\n return\n\n def evaluate_code_in_cell_class(self, code_cell_text, just_send_newlines=False):\n \"\"\"Evaluate the lines of code in the Cell class instance code_cell_text.\n The output is returned as a list of lines, and is also pasted onto the\n code_cell_text instance as the data field evaluation_output. Returns\n None for a non-code cell.\"\"\"\n\n basicType, inset_specifier_lang = code_cell_text.get_cell_type()\n if basicType == \"Output\": # if not a code cell\n code_cell_text.evaluation_output = None\n return None\n\n # Find the appropriate interpreter to evaluate the cell.\n # Note that the inset_specifier names are required to be unique.\n interpreter_process = self.all_interps.get_interpreter_process(\n self.lyx_process.server_get_filename(), inset_specifier_lang)\n interpreter_spec = interpreter_process.spec\n\n # If the interpreter_spec defines a noop_at_cell_end then append it to the cell\n # code. Python can add \"pass\\n\", for example, to always return to outer\n # indent level. Most languages will define it as None.\n noop_at_cell_end = interpreter_spec[\"noop_at_cell_end\"]\n extra_code_lines = []\n if noop_at_cell_end: # doesn't run for None or \"\", since they eval to False\n extra_code_lines = noop_at_cell_end.splitlines(True) # keepends=True\n\n # use another variable, to evaluate with modifications without changing original\n modified_code_cell_text = code_cell_text + extra_code_lines\n if just_send_newlines:\n modified_code_cell_text = [\"\\n\", \"\\n\"] + extra_code_lines\n\n # loop through each line of code, evaluating it and saving the results\n output = []\n ignore_empty_lines = interpreter_spec[\"ignore_empty_lines\"]\n if just_send_newlines: ignore_empty_lines = False\n for code_line in modified_code_cell_text:\n #print(\"debug processing line:\", [code_line])\n interp_result = self.process_physical_code_line(\n interpreter_process, code_line, ignore_empty_lines=ignore_empty_lines)\n #print(\"debug result of line:\", [interp_result])\n output = output + interp_result # get the result, per line\n\n if len(output) > lyxNotebook_user_settings.max_lines_in_output_cell:\n output = output[:lyxNotebook_user_settings.max_lines_in_output_cell]\n output.append(\"<<< WARNING: Lines truncated by LyX Notebook. >>>\"\"\")\n\n if self.no_echo is False and interpreter_spec[\"prompt_at_cell_end\"]:\n output.append(interpreter_process.most_recent_prompt)\n\n code_cell_text.evaluation_output = output\n return output\n\n def update_prompts(self, interp_result, interpreter_process):\n \"\"\"A utility function to update prompts across interpreter evaluation\n lines. The argument interp_result is a list of lines resulting from\n an interpreter evaluation. This routine prepends the most recently saved\n prompt to the first command on the list, and saves the last line of the\n list as the new most recently saved prompt (to prepend next time). Any\n autoindenting after prompts is stripped off.\"\"\"\n if len(interp_result) == 0: return\n interp_result[0] = interpreter_process.most_recent_prompt + interp_result[0]\n most_recent_prompt = interp_result[-1]\n # remove any autoindent from most_recent_prompt; note main and continuation\n # prompts might have different lengths (though they usually do not)\n if most_recent_prompt.find(interpreter_process.spec[\"main_prompt\"]) == 0:\n interpreter_process.most_recent_prompt = interpreter_process.spec[\"main_prompt\"]\n #print(\"debug replaced a main prompt\")\n elif most_recent_prompt.find(interpreter_process.spec[\"cont_prompt\"]) == 0:\n interpreter_process.most_recent_prompt = interpreter_process.spec[\"cont_prompt\"]\n #print(\"debug replaced a cont prompt\")\n else:\n print(\"Warning: prompt not recognized as main or continuation prompt.\")\n return interp_result[0:-1]\n\n def process_physical_code_line(self, interpreter_process, code_line,\n ignore_empty_lines=True):\n \"\"\"Process the physical line of code code_line in the interpreter with\n index interpIndex. Return a (possibly empty) list of all the result lines.\n The option ignore_empty_lines ignores completely empty (all whitespace) lines,\n but not lines with comments.\"\"\"\n\n # TODO, maybe convert any tabs to spaces in input lines\n\n interp_spec = interpreter_process.spec\n indent_calc = interpreter_process.indent_calc\n\n #print(\"\\ndebug code_line being processed is\", code_line.rstrip())\n\n # Ignore fully empty lines if ignore_empty_lines, but not lines with comments, etc.\n # Python interpreter actually only ends indent blocks on zero-length lines, not\n # lines with only whitespace, but those might as well be ignored, too.\n if ignore_empty_lines and len(code_line.rstrip()) == 0:\n if not indent_calc.in_string_literal():\n return []\n\n # update the indentation calculations for current physical line\n indent_calc.update_for_physical_line(code_line)\n\n # send a completely empty line if the indentation level decreased to zero\n # (uses a recursive function call which does not ignore_empty_lines)\n first_results = []\n if interp_spec[\"indent_down_to_zero_newline\"] and indent_calc.indent_level_down_to_zero():\n first_results = self.process_physical_code_line(interpreter_process, \"\\n\",\n ignore_empty_lines=False)\n\n # send the line of code to the interpreter\n interpreter_process.external_interp.write(code_line)\n\n # get the result of interpreting the line\n interp_result = interpreter_process.external_interp.read()\n interp_result = interp_result.splitlines(True) # keepends=True\n\n # if the final prompt was a main prompt, not continuation, reset indent counts\n if (len(interp_result) > 0\n and interp_result[-1].rstrip() == interp_spec[\"main_prompt\"].rstrip()\n and interp_result[-1].find(interp_spec[\"main_prompt\"]) == 0):\n indent_calc.reset()\n\n # update the prompts (to remove final prompt and put prev prompt at beginning)\n interp_result = self.update_prompts(interp_result, interpreter_process)\n\n # if spec removeNewlineBeforePrompt is True and last line is empty, remove it\n if len(interp_result) > 0 and interp_spec[\"del_newline_pre_prompt\"]:\n if interp_result[-1].strip() == \"\":\n interp_result = interp_result[:-1]\n\n # return the output, suppressing the first line if echo off\n # (note we're processing a physical line here, so the first line always\n # contains a prompt; even continued lines with no output have a prompt\n # line at the beginning)\n if self.no_echo and len(interp_result) > 0:\n return first_results + interp_result[1:]\n else:\n return first_results + interp_result\n\n def wrap_long_lines(self, line_list):\n \"\"\"A stub, which later can be used to do line-wrapping on long lines,\n or modified and renamed to do any sort of processing or formatting.\"\"\"\n return line_list\n\n def display_popup_message(self, message, text=None, seconds=3):\n \"\"\"Briefly display a message in a text window. Will use a textbox if\n text is not None, otherwise a msgbox. This is a kludge using a fork\n to work around the limitations of EasyGUI. BEWARE if an exit handler is\n later added... killing child might kill the running interpreters.\n Works, but is not currently used; messages are sent to the Lyx status\n bar instead.\"\"\"\n newpid = os.fork()\n if newpid == 0:\n # child displays text message until killed by parent\n if text is None:\n eg.msgbox(msg=message, title=\"LyX Notebook\", ok_button=\"\")\n else:\n eg.textbox(msg=message, title=\"LyX Notebook\", text=text)\n sys.exit(0) # in case user closes window before kill\n else:\n time.sleep(seconds)\n os.kill(newpid, signal.SIGHUP)\n return\n\n def replace_current_buffer_file(self, newfile, reload_buffer=True, messages=True):\n \"\"\"Replace the current buffer file with the file newfile, saving\n a backup of the old file. If reload_buffer=True then the Lyx buffer is\n reloaded.\"\"\"\n # write out buffer if it is unsaved, before copying to backup file\n self.lyx_process.process_lfun(\"buffer-write\", warn_error=False)\n\n # get the basic data\n dir_data = self.lyx_process.get_updated_lyx_directory_data(auto_save_update=False)\n num_backup_buffer_copies = lyxNotebook_user_settings.num_backup_buffer_copies\n\n # move the older save files down the list to make room\n for saveNum in range(num_backup_buffer_copies-1, 0, -1):\n older = \".LyxNotebookSave\" + str(saveNum) + \"_\" + dir_data[1]\n newer = \".LyxNotebookSave\" + str(saveNum-1) + \"_\" + dir_data[1]\n if os.path.exists(newer):\n if os.path.exists(older): os.remove(older)\n os.rename(newer, older)\n\n # wait for the buffer-write command started above to finish before final move\n prev_mtime = 0\n while True:\n mtime = os.stat(os.path.join(dir_data[0], dir_data[1])).st_mtime\n if mtime > prev_mtime:\n # mtime only has 1-sec resolution, so must wait over a sec...\n # could use os.path.getmtime if OS supports greater (float) resolution\n # could also check if write returned error or account for the move\n # times above to reduce the delay\n time.sleep(1.1)\n prev_mtime = mtime\n else: break\n\n # variable newer should have ended up at save file 0, so move buffer to that\n if os.path.exists(newer): os.remove(newer)\n os.rename(dir_data[1], newer)\n os.rename(newfile, dir_data[1])\n\n if reload_buffer: self.reload_buffer_file()\n if messages: self.lyx_process.show_message(\n \"Replaced current buffer with newly evaluated output cells.\")\n return\n\n def reload_buffer_file(self, dont_ask_first=True):\n \"\"\"Reload the current buffer file. If dont_ask_first is True a method is used\n which simply does the reload without asking the user.\"\"\"\n if dont_ask_first:\n # This command does not ask and always reloads:\n self.lyx_process.process_lfun(\"vc-command\", 'R $$p \"/bin/echo reloading...\"')\n # TODO: Bug if we do not modify file and write it back out as below! Why?\n # Cells are not read back in right, otherwise, until a save is done.\n self.lyx_process.process_lfun(\"command-sequence\",\n \"self-insert x;char-delete-backward;buffer-write\")\n else:\n # This LFUN will ask the user before reloading:\n self.lyx_process.process_lfun(\"buffer-reload\")\n return\n\n def revert_to_most_recent_batch_eval_backup(self, messages=False):\n \"\"\"Revert the most recently saved batch backup file to be current buffer.\"\"\"\n # get basic data, autosaving as last resort in case this makes things worse\n dir_data = self.lyx_process.get_updated_lyx_directory_data(auto_save_update=True)\n num_backup_buffer_copies = lyxNotebook_user_settings.num_backup_buffer_copies\n\n most_recent_backup = \".LyxNotebookSave0_\" + dir_data[1]\n most_recent_backup_full = os.path.join(dir_data[0], most_recent_backup)\n current_buffer_full = dir_data[3]\n\n if not os.path.exists(most_recent_backup_full):\n if messages:\n msg = \"Error: No backup file to recover.\"\n choices = ([\"OK\"])\n eg.buttonbox(msg, choices=choices)\n self.lyx_process.show_message(msg)\n print(msg)\n return\n\n back_time = time.ctime(os.stat(most_recent_backup_full).st_mtime)\n buffer_time = time.ctime(os.stat(current_buffer_full).st_mtime)\n\n msg = \"Are you sure you want to replace the current buffer with\"\n msg += \" the most recent backup?\"\n msg += \"\\nBuffer's time is:\\n \" + buffer_time\n msg += \"\\nBackup's time is:\\n \" + back_time\n choices = ([\"Yes\", \"No\"])\n reply = eg.buttonbox(msg, choices=choices)\n if reply != \"Yes\": return\n\n os.remove(current_buffer_full)\n os.rename(most_recent_backup_full, current_buffer_full)\n\n # shift down all the older backups\n for saveNum in range(1, num_backup_buffer_copies):\n older = \".LyxNotebookSave\" + str(saveNum) + \"_\" + dir_data[1]\n newer = \".LyxNotebookSave\" + str(saveNum-1) + \"_\" + dir_data[1]\n if os.path.exists(older):\n if os.path.exists(newer): os.remove(newer)\n os.rename(older, newer)\n\n self.reload_buffer_file()\n if messages:\n msg = \"Finished replacing current buffer with most recent batch backup\"\n msg += \" save file.\"\n self.lyx_process.show_message(msg)\n print(msg)\n return\n\n#\n# Testing code below, not usually run from this file as __main__. The lyxNotebook\n# script is now used to start up (after making sure another process isn't running).\n#\n\nif __name__ == \"__main__\":\n\n print(\"===================================================\")\n print()\n print(\"Starting the Lyx Notebook program...\")\n\n # start the controller\n\n controller = ControllerLyxWithInterpreter(\"lyxNotebookClient\")\n\n","sub_path":"src/lyxNotebook/controller_lyx_with_interpreter.py","file_name":"controller_lyx_with_interpreter.py","file_ext":"py","file_size_in_byte":53288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"494150421","text":"from sentiment_analysis import ArticleSentiment \nfrom news_query import return_articles\n\narticle_obj = ArticleSentiment('http://www.cnn.com/2018/01/07/politics/jake-tapper-stephen-miller/index.html')\nlist_articles = return_articles('WEINSTEIN', '2018-01-08')\n\nfor each in list_articles:\n try:\n obj = ArticleSentiment(each['url'])\n print('TITLE',obj.title_sentiment)\n print('TOTAL' , obj.overall_sentiment)\n\n except:\n print('failed for' , each['url'])\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"301648473","text":"import os\nimport subprocess\n\nfrom loguru import logger\n\n\nclass AndroidDebugBridge:\n\n \"\"\"adb脚本\n 1.获取设备id\n 2.文件导入\n 3.文件导出\n \"\"\"\n\n def get_devices(self):\n devices = []\n result = subprocess.Popen(\"adb devices\", shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE).stdout.readlines()\n for item in result:\n t = item.decode().split(\"\\tdevice\")\n if len(t) >= 2:\n devices.append(t[0])\n if len(devices) ==0 :\n raise (\"无有效设备可连接\")\n return devices\n\n def restart_device(self, device=None):\n subprocess.Popen(\"adb -s %s reboot\" % (device), shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n def file_push(self, file_path, store_path):\n \"\"\"\n 导入文件\n :param file_path: 电脑文件路径\n :param store_path: 设备存放路径\n :return:\n \"\"\"\n if not os.path.exists(file_path):\n logger.error(f\"文件路径不存在:{file_path}\\n\")\n raise Exception(f\"文件路径不存在:{file_path}\")\n try:\n os.system(f\"adb push {file_path} {store_path}\")\n except Exception as error:\n logger.error(\n f\"导入文件失败:{error}\\n \"\n f\"导入文件:{file_path}\\n\")\n\n def file_pull(self, file_path, store_path):\n \"\"\"\n 导出文件\n :param file_path: 设备文件路径\n :param store_path: 电脑存放路径\n :return:\n \"\"\"\n if not os.path.exists(store_path):\n logger.error(f\"文件夹路径不存在:{store_path}\\n\")\n raise Exception(f\"文件夹路径不存在:{store_path}\")\n try:\n os.system(f\"adb pull {file_path} {store_path}\")\n except Exception as error:\n logger.error(f\"导出文件失败:{error}\\n \"\n f\"导出文件:{file_path}\\n\")\n\n\nif __name__ == '__main__':\n adb = AndroidDebugBridge()\n # print(os.system(\"source ~/.bash_profile\"))\n # print(os.system(\"adb devices\"))\n # print(os.system(\"adb --version\"))\n print(adb.get_devices())","sub_path":"base/BaseAdb.py","file_name":"BaseAdb.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"349727370","text":"from pyparrot.Minidrone import Mambo\nfrom pyparrot.DroneVision import DroneVision\nimport threading\nimport cv2\nimport time\n\nclass UserVision:\n def __init__(self, vision):\n self.index = 0\n self.vision = vision\n\n def save_pictures(self, args):\n print(\"in save pictures on image %d \" % self.index)\n\n img = self.vision.get_latest_valid_picture()\n\n if (img is not None):\n filename = \"test_image_%06d.png\" % self.index\n cv2.imshow('frame', img)\n cv2.waitKey(1)\n self.index +=1\n #print(self.index)\n\nmamboAddr = \"e0:14:d0:63:3d:d0\"\n\nmambo = Mambo(mamboAddr, use_wifi=True)\nprint(\"trying to connect to mambo now\")\nsuccess = mambo.connect(num_retries=3)\nprint(\"connected: %s\" % success)\n\nif (success):\n # get the state information\n print(\"sleeping\")\n mambo.smart_sleep(1)\n mambo.ask_for_state_update()\n mambo.smart_sleep(1)\n\n print(\"Preparing to open vision\")\n mamboVision = DroneVision(mambo, is_bebop=False, buffer_size=30)\n userVision = UserVision(mamboVision)\n mamboVision.set_user_callback_function(userVision.save_pictures, user_callback_args=None)\n success = mamboVision.open_video()\n print(\"Success in opening vision is %s\" % success)\n\n if (success):\n a = input(\"presione una tecla para terminar\")\n print(\"Vision successfully started!\")\n #removed the user call to this function (it now happens in open_video())\n #mamboVision.start_video_buffering()\n print(\"Sleeeping for 15 seconds - move the mambo around\")\n #mambo.smart_sleep(15)\n # done doing vision demo\n print(\"Ending the sleep and vision\")\n cv2.destroyAllWindows()\n mamboVision.close_video()\n mambo.smart_sleep(5)\n\n print(\"disconnecting\")\n mambo.disconnect()\n","sub_path":"Vision.py","file_name":"Vision.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"487705246","text":"import threading\nimport queue\nimport time\nimport numpy as np\nimport serial\nimport matplotlib.pyplot as graf\nimport pickle\nimport metoder\n\nclass P:\n\n def __init__(self,x,skyv,tidsomloepnr,x_data,tid_data,runde,avstand):\n self.__x = x\n self.__skyv = skyv\n self.__tidsomloepnr = tidsomloepnr\n self.__x_data = x_data\n self.__tid_data = tid_data\n self.__runde = runde\n self.__avstand = avstand\n\n def getX(self):\n return self.__x\n\n def setX(self, x):\n self.__x = x\n\n def getSkyv(self):\n return self.__skyv\n\n def setSkyv(self,skyv):\n self.__skyv = skyv\n\n def gettidsomloepnr(self):\n return self.__tidsomloepnr\n\n def settidsomloepnr(self, tidsomloepnr):\n self.__tidsomloepnr = tidsomloepnr\n\n def getx_data(self):\n return self.__x_data\n\n def setx_data(self, x_data):\n\n self.__x_data = x_data\n\n def gettid_data(self):\n return self.__tid_data\n\n def settid_data(self, tid_data):\n self.__tid_data = tid_data\n\n def getrunde(self):\n return self.__runde\n\n def setrunde(self, runde):\n self.__runde = runde\n\n def get_avstand(self):\n return self.__avstand\n\n def set_avstand(self, avstand):\n self.__avstand = avstand\n\ndef hexascii2int(hex_teikn):\n if '0' <= hex_teikn <= '9':\n return (int(ord(hex_teikn) - 48)) # ASCII-koden for '0' er 0x30 = 48\n elif 'A' <= hex_teikn <= 'F':\n return (int(ord(hex_teikn) - 55)) # ASCII-koden for 'A' er 0x41 = 65\n\n# Lese metode ------------------------------------------------------------------------------------\n\ndef start_lesing():\n print('1')\n def lesing(port, hvilken_komando):\n tid = []\n x_data = []\n p1 = P(0, 0, 0, 0, 0, 0, 0)\n l = 1\n\n metoder.lagring_av_kontinuerlig_data(1,0,0)\n\n kl = 99\n while (l <= 10):\n a = 0\n data = []\n while a < 1:\n\n brukarkommandoar.put(kommando) # Gi melding til serietraaden om aa starta sjekking av port\n serieport.write('k'.encode('utf-8')) # Gi melding til uC-en om aa koeyra i gong # KT la til encoding\n teikn = str(serieport.read(1), encoding='utf-8') # Les eitt teikn. #KT La til convert til str\n data.append(teikn)\n\n if teikn == 'Z':\n a = 1\n #if teikn == 'S':\n # kl = 1\n\n #if kl <= 3:\n # print(teikn)\n # kl += 1\n\n #print(teikn)\n #print('her ',p1.getrunde())\n\n\n # dekoding\n p1 = (sortering(data, p1))\n\n tid.append(p1.gettid_data())\n x_data.append(p1.getx_data())\n\n metoder.lagring_av_kontinuerlig_data(1,tid,x_data)\n\n nyavstand = p1.get_avstand()\n metoder.lagring_av_kontinuerlig_data(3,nyavstand,p1.getrunde())\n\n (stop,stop2) = metoder.henting_av_kontinuerlig_data(2)\n\n\n if stop == 1:\n\n time.sleep(0.5)\n serieport.write('s'.encode('utf-8')) # Gi melding til uC-en om aa stoppa sending av nye data #KT La til encoding\n return\n\n # dekoding av data metode ----------------------------------------------------------------------------------------\n\n def sortering(inn_data, p1):\n i = 0\n k = 0\n ut_data_x = []\n ut_data_tid = []\n ut_data_avstand = []\n x_data = []\n tid = []\n avstand = []\n\n # Sortering av tid og x data\n while i < len(inn_data):\n if inn_data[i] == 'Y':\n ut_data_x.append(\n 4096 * hexascii2int(inn_data[i + 1]) + 256 * hexascii2int(inn_data[i + 2]) + 16 * hexascii2int(\n inn_data[i + 3]) + hexascii2int(inn_data[i + 4]))\n if inn_data[i] == 'T':\n ut_data_tid.append(16 * hexascii2int(inn_data[i + 1]) + hexascii2int(inn_data[i + 2]))\n if inn_data[i] == 'S':\n ut_data_avstand.append(\n 4096 * hexascii2int(inn_data[i + 1]) + 256 * hexascii2int(inn_data[i + 2]) + 16 * hexascii2int(\n inn_data[i + 3]) + hexascii2int(inn_data[i + 4]))\n\n i += 1\n\n # Behandling av x data\n if len(ut_data_x) == 0:\n print('ingen data')\n else:\n k = 0\n for k in range(0, len(ut_data_x)):\n if ut_data_x[k] >= 32768:\n x_data.append((float(ut_data_x[k]) - 65536.0) / 1000.0) # 1mg pr. LSb iflg. databladet.\n else:\n x_data.append(float(ut_data_x[k] / 1000.0))\n k += 1\n\n\n # Behandling av tid data\n if len(ut_data_tid) == 0:\n print('ingen data')\n else:\n k = 0\n Ts = 0.1\n for k in range(0, len(ut_data_tid)):\n tid.append(ut_data_tid[k] + p1.gettidsomloepnr() * 256)\n if ut_data_tid[k] == 255:\n p1.settidsomloepnr(p1.gettidsomloepnr() + 1)\n k += 1\n\n if p1.getX() == 0:\n p1.setSkyv(tid[0]) # Vil at tidslista skal starta paa null.\n p1.setX(1)\n\n for k in range(0, len(tid)):\n tid[k] = Ts * (tid[k] - p1.getSkyv())\n\n # Behandling av x data\n if len(ut_data_avstand) == 0:\n print('ingen data')\n else:\n k = 0\n for k in range(0, len(ut_data_avstand)):\n if ut_data_avstand[k] >= 32768:\n avstand.append((float(ut_data_avstand[k]) - 65536.0)) # 1mg pr. LSb iflg. databladet.\n else:\n avstand.append(float(ut_data_avstand[k]))\n k += 1\n print(avstand)\n\n ## avstands omregning\n ny_ok = sum(avstand)/85.5\n\n sensor_cm = 57.653*(pow(ny_ok,-0.9891))\n\n # lagrer tid og x data i p1\n p1.set_avstand(sensor_cm)\n p1.settid_data(tid)\n p1.setx_data(x_data)\n p1.setrunde(p1.getrunde() + 1)\n\n #print('x: ', x_data, 'tid: ', tid, 'runde: ', p1.getrunde())\n\n\n return p1\n\n#----------------------------------------------------------------------------------\n # Installering av variabler\n kommando = '0'\n brukarkommandoar = queue.Queue()\n connected = True\n port = 'COM3'\n baud = 115200 # 9600\n\n serieport = serial.Serial(port, baud, timeout=1)\n\n if serieport.isOpen():\n print(serieport.name, 'er open')\n else:\n serieport.open()\n\n\n\n starting = threading.Thread(target=lesing, args=(serieport, kommando))\n starting.start()\n\n return\n\n\n\n\n\n\n\n","sub_path":"Ny_Python/Logging.py","file_name":"Logging.py","file_ext":"py","file_size_in_byte":6760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"630890075","text":"from datetime import datetime\n\nimport pytz\n\nfrom yadm import fields\nfrom yadm.documents import Document\n\nfrom .test_database import BaseDatabaseTest\n\n\nclass DatetimeFieldTest(BaseDatabaseTest):\n def setUp(self):\n super().setUp()\n\n class TestDoc(Document):\n __collection__ = 'testdocs'\n dt = fields.DatetimeField()\n now = fields.DatetimeField(auto_now=True)\n\n self.TestDoc = TestDoc\n\n def test_cast(self):\n doc = self.TestDoc()\n doc.dt = datetime(1970, 1, 1, tzinfo=pytz.utc).isoformat()\n self.assertIsInstance(doc.dt, datetime)\n\n def test_default_none(self):\n doc = self.TestDoc()\n self.assertFalse(hasattr(doc, 'dt'))\n\n def test_save(self):\n doc = self.TestDoc()\n doc.dt = datetime(1970, 1, 1, tzinfo=pytz.utc)\n self.db.insert(doc)\n\n doc = self.db.get_queryset(self.TestDoc).with_id(doc.id)\n data = self.db.db.testdocs.find_one()\n\n self.assertIsInstance(data['dt'], datetime)\n self.assertEqual(data['dt'], doc.dt)\n\n def test_load(self):\n epoch = datetime(1970, 1, 1, tzinfo=pytz.utc)\n _id = self.db.db.testdocs.insert({'dt': epoch})\n\n doc = self.db.get_queryset(self.TestDoc).with_id(_id)\n\n self.assertIsInstance(doc.dt, datetime)\n self.assertEqual(doc.dt, epoch)\n\n def test_default_auto_now(self):\n doc = self.TestDoc()\n self.assertTrue(hasattr(doc, 'now'))\n self.assertIsInstance(doc.now, datetime)\n\n def test_default_now_save(self):\n doc = self.TestDoc()\n self.db.insert(doc)\n\n doc = self.db.get_queryset(self.TestDoc).with_id(doc.id)\n data = self.db.db.testdocs.find_one()\n\n self.assertIsInstance(data['now'], datetime)\n self.assertEqual(data['now'], doc.now)\n\n def test_now_save(self):\n epoch = datetime(1970, 1, 1, tzinfo=pytz.utc)\n\n doc = self.TestDoc()\n doc.now = epoch\n self.db.insert(doc)\n\n doc = self.db.get_queryset(self.TestDoc).with_id(doc.id)\n data = self.db.db.testdocs.find_one()\n\n self.assertIsInstance(data['now'], datetime)\n self.assertEqual(data['now'], epoch)\n\n def test_now_load(self):\n epoch = datetime(1970, 1, 1, tzinfo=pytz.utc)\n _id = self.db.db.testdocs.insert({'now': epoch})\n\n doc = self.db.get_queryset(self.TestDoc).with_id(_id)\n\n self.assertIsInstance(doc.now, datetime)\n self.assertEqual(doc.now, epoch)\n","sub_path":"tests/test_fields_datetime.py","file_name":"test_fields_datetime.py","file_ext":"py","file_size_in_byte":2494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"470052074","text":"import base64\nimport json\nimport uuid\n\nimport pytest\nfrom httpx import AsyncClient, MockTransport, Request, Response, codes\nfrom nexus_api import NexusAsyncClient, ResourceCatalog\n\nnexus_configuration_header_key = \"Nexus-Configuration\"\n\nrefresh_token = str(uuid.uuid1())\nrefresh_token_try_count: int = 0\ncatalog_try_count: int = 0\n\ndef _handler1(request: Request):\n global refresh_token\n global catalog_try_count\n global refresh_token_try_count\n\n if \"catalogs\" in request.url.path:\n catalog_try_count += 1\n actual = request.headers[\"Authorization\"]\n\n if catalog_try_count == 1:\n assert f\"Bearer 111\" == actual\n return Response(codes.UNAUTHORIZED, headers={\"WWW-Authenticate\" : \"Bearer The token expired at ...\"})\n\n else:\n catalog_json_string = '{\"Id\":\"my-catalog-id\",\"Properties\":null,\"Resources\":null}'\n assert f\"Bearer 333\" == actual\n return Response(codes.OK, content=catalog_json_string)\n\n elif \"tokens/refresh\" in request.url.path:\n refresh_token_try_count += 1\n requestContent = request.content.decode(\"utf-8\")\n\n if refresh_token_try_count == 1:\n assert f'{{\"refreshToken\": \"{refresh_token}\"}}' == requestContent\n return Response(codes.OK, content='{ \"accessToken\": \"111\", \"refreshToken\": \"222\" }')\n\n else:\n assert '{\"refreshToken\": \"222\"}' == requestContent\n return Response(codes.OK, content='{ \"accessToken\": \"333\", \"refreshToken\": \"444\" }')\n\n else:\n raise Exception(\"Unsupported path.\")\n\n@pytest.mark.asyncio\nasync def can_authenticate_and_refresh_test():\n\n # arrange\n catalog_id = \"my-catalog-id\"\n expected_catalog = ResourceCatalog(catalog_id, None, None)\n http_client = AsyncClient(base_url=\"http://localhost\", transport=MockTransport(_handler1))\n\n async with NexusAsyncClient(http_client) as client:\n\n # act\n await client.sign_in(refresh_token)\n actual_catalog = await client.catalogs.get(catalog_id)\n \n # assert\n assert expected_catalog == actual_catalog\n\ntry_count2: int = 0\n\ndef _handler2(request: Request):\n global try_count2\n\n if \"catalogs\" in request.url.path:\n try_count2 += 1\n\n if (try_count2 == 1):\n assert not nexus_configuration_header_key in request.headers\n\n elif (try_count2 == 2):\n\n configuration = {\n \"foo1\": \"bar1\",\n \"foo2\": \"bar2\"\n }\n\n expected = base64.b64encode(json.dumps(configuration).encode(\"utf-8\")).decode(\"utf-8\")\n actual = request.headers[nexus_configuration_header_key]\n\n assert expected == actual\n\n elif (try_count2 == 3):\n assert not nexus_configuration_header_key in request.headers\n\n catalog_json_string = '{\"Id\":\"my-catalog-id\",\"Properties\":null,\"Resources\":null}'\n return Response(codes.OK, content=catalog_json_string)\n\n elif \"refresh-token\" in request.url.path:\n requestContent = request.content.decode(\"utf-8\")\n assert '{\"refreshToken\": \"456\"}' == requestContent\n\n new_token_pair_json_string = '{ \"accessToken\": \"123\", \"refreshToken\": \"456\" }'\n return Response(codes.OK, content=new_token_pair_json_string)\n\n else:\n raise Exception(\"Unsupported path.\")\n\n@pytest.mark.asyncio\nasync def can_add_configuration_test():\n\n # arrange\n catalog_id = \"my-catalog-id\"\n\n configuration = {\n \"foo1\": \"bar1\",\n \"foo2\": \"bar2\"\n }\n\n http_client = AsyncClient(base_url=\"http://localhost\", transport=MockTransport(_handler2))\n\n async with NexusAsyncClient(http_client) as client:\n\n # act\n _ = await client.catalogs.get(catalog_id)\n\n with client.attach_configuration(configuration):\n _ = await client.catalogs.get(catalog_id)\n\n _ = await client.catalogs.get(catalog_id)\n\n # assert (already asserted in _handler2)\n","sub_path":"tests/clients/python-client-tests/async-client-tests.py","file_name":"async-client-tests.py","file_ext":"py","file_size_in_byte":3954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"639491784","text":"from selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\nimport time\r\nimport random\r\nimport pyautogui\r\n\r\n# Fiz algumas modificações\r\n\r\n\r\nclass AnimeBot:\r\n def init(self):\r\n firefoxProfile = webdriver.FirefoxProfile()\r\n firefoxProfile.set_preference(\"intl.accept_languages\", \"pt,pt-BR\")\r\n firefoxProfile.set_preference(\"dom.webnotifications.enabled\", False)\r\n self.driver = webdriver.Firefox(\r\n firefox_profile=firefoxProfile, executable_path=r\"C:\\\\geckodriver.exe\"\r\n )\r\n \"\"\" # Coloque o caminho para o seu geckodriver aqui, lembrando que você precisa instalar o firefox e geckodriver na versão mais atual \"\"\"\r\n # Link download do geckodriver: https://github.com/mozilla/geckodriver/releases\r\n # Link download Firefox https://www.mozilla.org/pt-BR/firefox/new/\r\n def baixar(self):\r\n driver = self.driver\r\n driver.get(\"https://www.meuanime.com/baixar?file=5408677\")\r\n time.sleep(3)\r\n btn_ini = driver.find_element_by_partial_link_text('Clique')\r\n btn_ini.click()\r\n pyautogui.click(x=480, y=450)\r\n time.sleep(1)\r\n pyautogui.press('down')\r\n pyautogui.press('enter')\r\n '''\r\n a_element = driver.find_element_by_xpath(\"//input[@id='input_username']\")\r\n a_element.send_keys(self.a)\r\n b_element = driver.find_element_by_xpath(\"//input[@id='input_password']\")\r\n b_element.send_keys(self.b)\r\n login_button = driver.find_element_by_xpath(\"//button[@type='submit']\")\r\n login_button.click()'''\r\n \r\nAnime = AnimeBot()\r\nAnime.baixar()","sub_path":"stream.py","file_name":"stream.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"421410226","text":"from django.template import loader, RequestContext\nfrom MedApp.models import Client\nfrom django.contrib import auth\n\ndef application_info_processor(request):\n base_info = {}\n user = auth.get_user(request)\n base_info['medapp_active'] = True\n base_info['User'] = user\n base_info['Clients'] = Client.objects.filter(user_id = user.id)\n return (base_info)","sub_path":"stack_project/Project/MedApp/context_processor.py","file_name":"context_processor.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"86919974","text":"# -*- coding: utf-8 -*-\nfrom markdown.extensions import Extension\nfrom django.core import urlresolvers\nimport wiki.models\nimport re\nfrom markdown.inlinepatterns import Pattern as InlinePattern\nfrom markdown.preprocessors import Preprocessor\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom markdown.util import etree\n\nclass inlineInternalLink(InlinePattern):\n regex = r'\\[\\[([^\\]|]+)(\\|([^\\]]+))?\\]\\]'\n\n def handleMatch(self, m):\n url = urlresolvers.reverse(\"wiki-show\",args=[m.group(2)])\n\n el = etree.Element(\"a\")\n el.set(\"href\",url)\n\n notFound = False\n\n try:\n wikiArticle = wiki.models.Article.objects.get(slug=m.group(2))\n except ObjectDoesNotExist:\n try:\n wikiArticle = wiki.models.Redirection.objects.get(slug=m.group(2))\n except ObjectDoesNotExist:\n el.set(\"class\", \"notFound\")\n notFound = True\n\n if m.group(4) :\n el.text = m.group(4)\n el.set(\"title\",m.group(4))\n elif notFound:\n el.text = m.group(2)\n el.set(\"title\",m.group(2))\n else:\n el.text = wikiArticle.title\n el.set(\"title\",wikiArticle.title)\n\n return el\n\nclass inlineProjectLink(InlinePattern):\n regex = r'!\\[\\[([^\\]|]+)(\\|([^\\]]+))\\]\\]'\n\n def handleMatch(self, m):\n url = urlresolvers.reverse(\"project-show\",args=[m.group(2)])\n\n el = etree.Element(\"a\")\n el.set(\"href\",url)\n\n if m.group(3) :\n el.text = m.group(4)\n el.set(\"title\",m.group(4))\n else:\n el.text = m.group(2)\n el.set(\"title\",m.group(4))\n\n return el\n\nclass inlineEventLink(InlinePattern):\n regex = r'\\?\\[\\[([^\\]|]+)(\\|([^\\]]+))\\]\\]'\n\n def handleMatch(self, m):\n url = urlresolvers.reverse(\"event-show\",args=[m.group(2)])\n\n el = etree.Element(\"a\")\n el.set(\"href\",url)\n\n if m.group(3) :\n el.text = m.group(4)\n el.set(\"title\",m.group(4))\n else:\n el.text = m.group(2)\n el.set(\"title\",m.group(4))\n\n return el\n\nclass inlineMediaInsert(InlinePattern):\n regex = r'\\$\\[\\[([^\\]\\|]+)(\\|([^\\]]+))?\\]\\](c?)'\n\n def handleMatch(self, m):\n try:\n media = wiki.models.Media.objects.get(name=m.group(2))\n except ObjectDoesNotExist:\n el = etree.Element(\"p\")\n el.text = \"Image \"+m.group(2)+\" introuvable !\"\n return el\n\n el = etree.Element(\"img\")\n el.set(\"src\",media.file.url)\n el.set(\"alt\",media.name)\n if m.group(4):\n el.set(\"title\",m.group(4))\n if m.group(5):\n el.set(\"class\",\"center\")\n return el\n\nclass wikiMarkdownExtention(Extension):\n def extendMarkdown(self, md, md_globals):\n md.inlinePatterns.add('internallink',inlineInternalLink(inlineInternalLink.regex,md),'_end')\n md.inlinePatterns.add('projectlink',inlineProjectLink(inlineProjectLink.regex,md),' default_depth:\r\n # Item is missing component info or the depth is too high\r\n bad_depth_items.append(item)\r\n\r\n elif \"from\" in data and data[\"from\"]:\r\n # The item has components and therefore is missing depth info\r\n bad_depth_items.append(item)\r\n\r\n if bad_depth_items:\r\n message = \"Some items do not list their depth correctly\"\r\n return False, message, bad_depth_items\r\n return True, None\r\n\r\n\r\n@set_status(TEST_STATUS.in_development)\r\ndef test_item_components_available(data_set):\r\n \"\"\"Check each component for an item can be built in the same map\"\"\"\r\n all_items = data_set[\"data\"]\r\n\r\n unavailable_components = dict()\r\n for item, data in all_items.items():\r\n if \"from\" in data:\r\n for depedency in data[\"from\"]:\r\n component = all_items[depedency]\r\n for map_id, available in data[\"maps\"].items():\r\n if available and not component[\"maps\"][map_id]:\r\n # On this map the item is available\r\n # but it's component isn't.\r\n if item not in unavailable_components:\r\n unavailable_components[item] = list()\r\n unavailable_components[item].append(depedency)\r\n break\r\n\r\n if unavailable_components:\r\n message = \"Some items have components which are not available on all the same maps\"\r\n return False, message, unavailable_components\r\n return True, None\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run_tests(TEST_STATUS.in_development)\r\n","sub_path":"lol_item_verifier/item_verification/test_item_paths.py","file_name":"test_item_paths.py","file_ext":"py","file_size_in_byte":5218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"450354173","text":"\n\nfrom xai.brain.wordbase.nouns._billing import _BILLING\n\n#calss header\nclass _BILLINGS(_BILLING, ):\n\tdef __init__(self,): \n\t\t_BILLING.__init__(self)\n\t\tself.name = \"BILLINGS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"billing\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_billings.py","file_name":"_billings.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"573878572","text":"import json\nimport os.path\nimport re\nfrom datetime import datetime\nfrom urllib.parse import urlencode\nfrom urllib.request import urlopen\n\nimport requests\nfrom flask import Flask, render_template, redirect, session, g\nfrom flask_openid import OpenID\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\napp.config.from_envvar('STEAMLENS_SETTINGS')\n\noid = OpenID(app, os.path.join(os.path.dirname(__file__), app.config['OPENID_STIRE']))\ndb = SQLAlchemy(app)\n\n\n#######################\n# Recommender Service #\n#######################\n\n@app.context_processor\ndef inject_current_time():\n return {'current_time': datetime.utcnow()}\n\n\n@app.route('/')\ndef index():\n return redirect('/pop')\n\n\n@app.route('/pop')\ndef pop():\n # Get nickname\n nickname = None\n if g.user:\n nickname = g.user.nickname\n # Get items\n r = requests.get('%s/popular?number=%d' % (app.config['GORSE_API_URI'], app.config['GORSE_NUM_ITEMS']))\n items = [v['ItemId'] for v in r.json()]\n # Render page\n return render_template('page_gallery.jinja2', title='Popular Games', items=items, nickname=nickname)\n\n\n@app.route('/latest')\ndef latest():\n # Get nickname\n nickname = None\n if g.user:\n nickname = g.user.nickname\n # Get items\n r = requests.get('%s/latest?number=%d' % (app.config['GORSE_API_URI'], app.config['GORSE_NUM_ITEMS']))\n items = [v['ItemId'] for v in r.json()]\n # Render page\n return render_template('page_gallery.jinja2', title='Latest Games', items=items, nickname=nickname)\n\n\n@app.route('/random')\ndef random():\n # Get nickname\n nickname = None\n if g.user:\n nickname = g.user.nickname\n # Get items\n r = requests.get('%s/random?number=%d' % (app.config['GORSE_API_URI'], app.config['GORSE_NUM_ITEMS']))\n items = [v['ItemId'] for v in r.json()]\n # Render page\n return render_template('page_gallery.jinja2', title='Random Games', items=items, nickname=nickname)\n\n\n@app.route('/recommend')\ndef recommend():\n # Check login\n if g.user is None:\n return render_template('page_gallery.jinja2', title='Please login first', items=[])\n # Get items\n r = requests.get('%s/recommends/%s?number=%s' %\n (app.config['GORSE_API_URI'], g.user.steam_id, app.config['GORSE_NUM_ITEMS']))\n # Render page\n if r.status_code == 200:\n items = [v['ItemId'] for v in r.json()]\n return render_template('page_gallery.jinja2', title='Recommended Games', items=items, nickname=g.user.nickname)\n return render_template('page_gallery.jinja2', title='Generating Recommended Games...', items=[], nickname=g.user.nickname)\n\n\n@app.route('/item/')\ndef item(app_id: int):\n # Get nickname\n nickname = None\n if g.user:\n nickname = g.user.nickname\n # Get items\n r = requests.get('%s/neighbors/%d?number=%d' %\n (app.config['GORSE_API_URI'], app_id, app.config['GORSE_NUM_ITEMS']))\n items = [v['ItemId'] for v in r.json()]\n # Render page\n return render_template('page_app.jinja2', item_id=app_id, title='Similar Games', items=items, nickname=nickname)\n\n\n@app.route('/user')\ndef user():\n # Check login\n if g.user is None:\n return render_template('page_gallery.jinja2', title='Please login first', items=[])\n # Get items\n r = requests.get('%s/user/%s/feedback' % (app.config['GORSE_API_URI'], g.user.steam_id))\n # Render page\n if r.status_code == 200:\n items = [v['ItemId'] for v in r.json()]\n return render_template('page_gallery.jinja2', title='Owned Games', items=items, nickname=g.user.nickname)\n return render_template('page_gallery.jinja2', title='Synchronizing Owned Games ...', items=[], nickname=g.user.nickname)\n\n\n#################\n# Steam Service #\n#################\n\nclass User(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n steam_id = db.Column(db.String(40))\n nickname = db.Column(db.String(80))\n\n @staticmethod\n def get_or_create(steam_id):\n rv = User.query.filter_by(steam_id=steam_id).first()\n if rv is None:\n rv = User()\n rv.steam_id = steam_id\n db.session.add(rv)\n return rv\n\n\n@app.route(\"/login\")\n@oid.loginhandler\ndef login():\n if g.user is not None:\n return redirect(oid.get_next_url())\n else:\n return oid.try_login(\"http://steamcommunity.com/openid\")\n\n\n@app.route('/logout')\ndef logout():\n session.pop('user_id', None)\n return redirect('/pop')\n\n\n@app.before_request\ndef before_request():\n g.user = None\n if 'user_id' in session:\n g.user = User.query.filter_by(id=session['user_id']).first()\n\n\n@oid.after_login\ndef new_user(resp):\n _steam_id_re = re.compile('steamcommunity.com/openid/id/(.*?)$')\n match = _steam_id_re.search(resp.identity_url)\n g.user = User.get_or_create(match.group(1))\n steamdata = get_user_info(g.user.steam_id)\n g.user.nickname = steamdata['personaname']\n db.session.commit()\n session['user_id'] = g.user.id\n # Add games to gorse\n games = get_owned_games(g.user.steam_id)\n data = [{'UserId': str(g.user.steam_id), 'ItemId': str(v['appid']), 'Rating': float(v['playtime_forever'])} for v in games]\n headers = {\"Content-Type\": \"application/json\"}\n requests.put('http://127.0.0.1:8080/feedback', data=json.dumps(data), headers=headers)\n return redirect(oid.get_next_url())\n\n\ndef get_user_info(steam_id):\n options = {\n 'key': app.secret_key,\n 'steamids': steam_id\n }\n url = 'http://api.steampowered.com/ISteamUser/' \\\n 'GetPlayerSummaries/v0001/?%s' % urlencode(options)\n rv = json.load(urlopen(url))\n return rv['response']['players']['player'][0] or {}\n\n\ndef get_owned_games(steam_id):\n options = {\n 'key': app.secret_key,\n 'steamid': steam_id,\n 'format': 'json'\n }\n url = 'http://api.steampowered.com/IPlayerService/GetOwnedGames/v0001/?%s' % urlencode(options)\n rv = json.load(urlopen(url))\n return rv['response']['games']\n\n\ndef get_friend_list(steam_id: str):\n options = {\n 'key': app.secret_key,\n 'steamid': steam_id,\n 'format': 'json',\n 'relationship': 'friend'\n }\n url = 'http://api.steampowered.com/IPlayerService/GetFriendList/v0001/?%s' % urlencode(options)\n rv = json.load(urlopen(url))\n return rv['friendslist']['friends']\n\n\n# Create tables if not exists.\ndb.create_all()\n","sub_path":"steamlens/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"404466511","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\ndef graph(func, X,y):\n mean=[]\n std=[]\n alpha=np.logspace(-4, -0.5, 30)\n for i in alpha:\n temp=func(X,y,alpha=i)\n mean.append(temp[0])\n std.append(temp[1])\n \n plt.figure(figsize=(10,5))\n plt.subplot(1,2,1) \n plt.semilogx(alpha,mean)\n plt.xlabel('mean')\n plt.subplot(1,2,2)\n plt.semilogx(alpha,std)\n plt.xlabel('std')\n plt.show()\n return 1\n","sub_path":"ch7_graph.py","file_name":"ch7_graph.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"483085463","text":"\"\"\"\nvinicius miranda de pinho\n\nnull\n\ninteger\n\nreal\n\ntext\n\nblob\n\n28 de jan 2020\n\nadd new line\n\nadd senha\n\ntestes\n\nteste\n\nteste de entrada\n\nteste de merda\n\n\"\"\"\n\nimport sqlite3\nimport time\nfrom datetime import datetime\nfrom employee_001 import Employee\n\n\ndef write_sqlite3():\n # current date and time\n dateTimeObj = datetime.now()\n timestamp = dateTimeObj.strftime(\"%d-%b-%Y (%H:%M:%S.%f)\")\n print(\"date =\", timestamp)\n print(type(timestamp))\n\n conn = sqlite3.connect('employee.db')\n\n c = conn.cursor() # for call commands\n try:\n\n c.execute(\"\"\"CREATE TABLE employee(\n first text,\n last text,\n pay integer,\n date text\n )\"\"\")\n\n except sqlite3.OperationalError:\n print(\"SQL DB already exit\")\n\n emp1 = Employee('john', 'doe', 65000)\n emp2 = Employee('vini', 'pinho', 75000)\n date_rec = timestamp\n\n # c.execute(\"INSERT INTO employee VALUES ('{}', '{}', '{}', '{}')\".format(first_name, last_name, pay, date_rec))\n c.execute(\"INSERT INTO employee VALUES (:first, :last, :pay, :date)\",\n {'first': emp1.first_name, 'last': emp1.last_name, 'pay': emp1.pay, 'date': date_rec})\n\n conn.commit() # must be commit\n\n conn.close()\n\n return\n\n\nwrite_sqlite3()\nprint(\"Record end SQL\")\n","sub_path":"sqlite3_001.py","file_name":"sqlite3_001.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"182708605","text":"# Longest Common Prefix\n# Write a funtion to find the longest common prefix string amongst an array of strings.\n\nclass Solution:\n # @param {string[]} strs\n # @return {string}\n def compareTwo(self, str1, str2):\n l1 = len(str1)\n l2 = len(str2)\n l = l1 if l1 < l2 else l2\n i = 0\n while i < l:\n if str1[i] != str2[i]:\n break\n i += 1\n return str1[:i]\n\n def longestCommonPrefix(self, strs):\n if len(strs) == 0:\n return ''\n s = strs[0]\n for i in xrange(1, len(strs)):\n s = self.compareTwo(s, strs[i])\n return s\n","sub_path":"python/p_014.py","file_name":"p_014.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"220560880","text":"# -*- coding:UTF-8 -*-\n#\n#\n\nfrom elasticsearch import Elasticsearch\nfrom pathlib import Path\nfrom dotenv import load_dotenv\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import Column, Integer, String\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import create_engine\nfrom operator import itemgetter\nfrom itertools import groupby\nimport scrapy\nimport sys\nimport sqlalchemy\nimport os\nimport re\nimport datetime\n\n\n# settings.py\nes_user = os.getenv(\"ES_USER\")\nes_pwd = os.getenv(\"ES_PWD\")\n\nes = Elasticsearch(http_auth=(es_user, es_pwd))\n\n\ndef cont_filter(x):\n return x.replace(\"、\", \".\").replace(\"\\n\", \"\").strip(\" \") != \"\"\n\n\ndef distinct(items):\n key = itemgetter('link')\n items = sorted(items, key=key)\n return [next(v) for _, v in groupby(items, key=key)]\n\n\nclass AliSpider(scrapy.Spider):\n # 593\n name = \"escn_new\"\n start_urls = [\n 'https://elasticsearch.cn/explore/category-18',\n ]\n\n page_list = []\n\n source = \"escn\"\n tag = \"elastic\"\n\n def parse(self, response):\n last_create = None\n res = es.search(index=\"article\", body={\"query\": {\"query_string\": {\n \"query\": \"source:escn\"}}, \"size\": 1, \"_source\": \"created_at\", \"sort\": [{\"created_at\": {\"order\": \"desc\"}}]})\n end_crawl = False\n if len(res['hits']['hits']) > 0:\n last_create = res['hits']['hits'][0]['_source']['created_at']\n last_create = datetime.datetime.strptime(last_create, '%Y-%m-%d')\n for item in response.css('.aw-common-list .aw-item'):\n\n title_h4 = item.css('.aw-question-content h4')\n title = title_h4.css('a::text').extract_first()\n title = title.replace(\"\\n\", \"\").strip()\n \n mat = re.search(r\"(\\d{4}-\\d{1,2}-\\d{1,2})\", title)\n _date = None\n if mat != None:\n _date = mat.group(0)\n _date = datetime.datetime.strptime(_date, '%Y-%m-%d')\n\n link = title_h4.css('a::attr(href)').extract_first()\n print('last_create', last_create)\n print('_date', _date)\n \n if (last_create != None) and (_date <= last_create):\n end_crawl = True\n break\n else:\n _item = {\n \"title\": title,\n \"link\": link\n }\n\n self.page_list.append(_item)\n\n next_page_li = response.css('.pagination li:nth-last-child(2)')\n next_page_a_text = next_page_li.css('a::text').extract_first()\n\n if next_page_a_text == \">\":\n next_page_a_link = next_page_li.css(\n 'a::attr(href)').extract_first()\n else:\n\n next_page_li = response.css('.pagination li:nth-last-child(1)')\n next_page_a_text = next_page_li.css('a::text').extract_first()\n\n if next_page_a_text == \">\":\n next_page_a_link = next_page_li.css(\n 'a::attr(href)').extract_first()\n else:\n next_page_a_link = None\n\n if (next_page_a_link is not None) and (not end_crawl):\n yield response.follow(next_page_a_link, callback=self.parse)\n else:\n\n if len(self.page_list) > 0:\n\n self.page_list = distinct(self.page_list)\n item = self.page_list.pop(0)\n yield response.follow(item['link'], callback=self.parse_items)\n\n def parse_items(self, response):\n\n title = response.xpath(\n \"/html/body/div[3]/div/div/div/div[1]/div[1]/div[1]/h1/text()\").get()\n title = title.replace(\"\\n\", \"\").strip(\" \")\n\n date = None\n year = None\n mat = re.search(r\"(\\d{4}-\\d{1,2}-\\d{1,2})\", title)\n if mat != None:\n date = mat.group(0)\n mat2 = re.search(r\"(\\d{4})\", date)\n year = mat2.group(0)\n\n t_pattern = re.compile(r'(?<=第)\\d+')\n ver = 0\n vers = re.findall(t_pattern, title)\n\n if len(vers) > 0:\n ver = int(vers[0])\n\n conuter = 0\n\n contents = response.css(\"#markdown_out::text\").extract()\n links = response.css(\"#markdown_out a::attr(href)\").extract()\n\n contents = list(filter(cont_filter, contents))\n if len(contents) == 0:\n contents = response.xpath(\n '//*[@id=\"markdown_out\"]/p/text()').getall()\n links = response.xpath(\n '//*[@id=\"markdown_out\"]/p/a/@href').getall()\n\n pattern = re.compile(r'^\\d\\.')\n\n bulk = []\n for content in contents:\n\n content = content.replace(\"、\", \".\")\n content = content.replace(\"\\n\", \"\")\n content = content.strip()\n\n match = re.search(r'^\\d\\.', content)\n\n if match is not None:\n content = re.sub(pattern, '', content)\n link = links[conuter]\n conuter += 1\n doc = {}\n\n doc['title'] = content\n\n doc['url'] = link\n doc['summary'] = title\n\n doc['tag'] = self.tag\n doc['source'] = self.source\n\n if date != None:\n doc['created_at'] = date\n doc['created_year'] = year\n\n doc['stars'] = 0\n bulk.append(\n {\"index\": {\"_index\": \"article\"}})\n bulk.append(doc)\n\n if len(bulk) > 0:\n es.bulk(index=\"article\", body=bulk)\n\n if len(self.page_list) > 0:\n item = self.page_list.pop(0)\n yield response.follow(item['link'], callback=self.parse_items)\n","sub_path":"scrapy/tutorial/spiders/escn_new.py","file_name":"escn_new.py","file_ext":"py","file_size_in_byte":5710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"57351960","text":"#from questions.question_type.recommender.provider import Provider\n#from questions.question_type.recommender.stack_data.tokenmaker import Tokenizer\nfrom question_tag_mtx import QuestionTagMtx\nfrom stack_data.tokenmaker import Tokenizer\nfrom collections import Counter\nimport math\nimport numpy as np\nimport scipy.stats as sstat\n\nclass Recommender:\n def __init__(self, init_data):\n self.provider = QuestionTagMtx(init_data)\n self.tokenizer = Tokenizer()\n self.refs, self.tags = self.provider.refs, self.provider.tags\n self.global_values = []\n self.question_asked = []\n\n\n def filter_mtx(self, tokenized_question, limit=10):\n self.question_asked = tokenized_question\n tags = {}\n questions = []\n for token in tokenized_question:\n try:\n tags[token] = self.refs[token]\n questions += tags[token]['cols']\n except:\n print(\"Not found\")\n\n questions = np.asarray(questions)\n values, counts = np.unique(questions, return_counts=True)\n idx = np.argsort(counts)[::-1]\n self.global_values = values[idx]\n\n return self.filter_ids(self.global_values, 0, limit)\n\n\n def get_questions(self,id_block):\n results = self.provider.get_questions(id_block)\n return results[:len(id_block)]\n\n\n def filter_ids(self, values, start ,limit):\n values = list(filter( lambda x: x.size > 0, values[start:start + limit * 2]))\n return self.questions[values]\n\n\n def calc_sim(self, filtered_questions):\n\n cos_sim = []\n question_asked = Counter(self.question_asked)\n\n for question in filtered_questions:\n print(question)\n body = Counter(self.tokenizer.tokenize(question['body']))\n cos_sim.append({'similarity': self.get_cosine(question_asked, body), 'body': question['body'], 'linkto': question['url']})\n\n return sorted(cos_sim, key=lambda x: x['similarity'])[::-1]\n\n\n def get_cosine(self,vec1, vec2):\n intersection = set(vec1.keys()) & set(vec2.keys())\n numerator = sum([vec1[x] * vec2[x] for x in intersection])\n\n sum1 = sum([vec1[x]**2 for x in vec1.keys()])\n sum2 = sum([vec2[x]**2 for x in vec2.keys()])\n denominator = math.sqrt(sum1) * math.sqrt(sum2)\n\n if not denominator:\n return 0.0\n else:\n return float(numerator) / denominator\n\n def recommend(self, query,results=10):\n tokenized_question = self.tokenizer.tokenize(query.lower())\n _ids = self.filter_mtx(tokenized_question,results)\n results = self.get_questions(_ids)\n return self.calc_sim(results)\n","sub_path":"services/analytics_server/questions/question_type/question_based_rec/recommender.py","file_name":"recommender.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"599954579","text":"import scipy.integrate\nimport time\nimport datetime\nimport signal\nimport threading\nimport scipy.stats as stats\nimport numpy as np\nimport math\nfrom scipy import signal\n\n\n\n\n\n\nlower, upper = 0, 1\nmu = 0\nsigma = 1\nrandDist = stats.truncnorm((lower - mu) / sigma, (upper - mu) / sigma, loc=mu,\n scale=sigma)\n\nclass Propeller():\n def __init__(self, prop_dia, prop_pitch, thrust_unit='N'):\n self.dia = prop_dia\n self.pitch = prop_pitch\n self.thrust_unit = thrust_unit\n self.speed = 0 #RPM\n self.thrust = 0\n self.fault_mag = 0\n\n def set_speed(self,speed):\n self.speed = speed\n if self.fault_mag > 0:\n #print(self.fault_mag)\n #print(\"Speed before :\" + str(self.speed))\n self.speed = self.speed * (1 - self.fault_mag)\n # print(\"Speed after :\" + str(self.speed))\n # From http://www.electricrcaircraftguy.com/2013/09/propeller-static-dynamic-thrust-equation.html\n self.thrust = 4.392e-8 * self.speed * math.pow(self.dia,3.5)/(math.sqrt(self.pitch))\n self.thrust = self.thrust*(4.23e-4 * self.speed * self.pitch)\n\n if self.thrust_unit == 'Kg':\n self.thrust = self.thrust*0.101972\n def get_speed(self):\n return self.speed\n\n def set_fault(self,fault):\n self.fault_mag = fault\n\n\nclass Quadcopter():\n # State space representation: [x y z x_dot y_dot z_dot theta phi gamma theta_dot phi_dot gamma_dot]\n # From Quadcopter Dynamics, Simulation, and Control by Andrew Gibiansky\n def __init__(self,quads,gravity=9.81,b=0.0245):\n self.quads = quads\n self.g = gravity\n self.b = b\n self.wind = True\n self.windMag = 0\n self.thread_object = None\n self.ode = scipy.integrate.ode(self.state_dot).set_integrator('vode',nsteps=500,method='bdf')\n self.time = datetime.datetime.now()\n self.stepNum = 0\n self.airspeed = 15\n self.randWind = self.generate_wind_turbulence(5)\n self.XWind = 0\n self.YWind = 0\n self.ZWind = 0\n\n\n for key in self.quads:\n\n self.quads[key]['state'] = np.zeros(12)\n self.quads[key]['state'][0:3] = self.quads[key]['position']\n self.quads[key]['state'][6:9] = self.quads[key]['orientation']\n self.quads[key]['m1'] = Propeller(self.quads[key]['prop_size'][0],self.quads[key]['prop_size'][1])\n self.quads[key]['m2'] = Propeller(self.quads[key]['prop_size'][0],self.quads[key]['prop_size'][1])\n self.quads[key]['m3'] = Propeller(self.quads[key]['prop_size'][0],self.quads[key]['prop_size'][1])\n self.quads[key]['m4'] = Propeller(self.quads[key]['prop_size'][0],self.quads[key]['prop_size'][1])\n # From Quadrotor Dynamics and Control by Randal Beard\n ixx=((2*self.quads[key]['weight']*self.quads[key]['r']**2)/5)+(2*self.quads[key]['weight']*self.quads[key]['L']**2)\n iyy=ixx\n izz=((2*self.quads[key]['weight']*self.quads[key]['r']**2)/5)+(4*self.quads[key]['weight']*self.quads[key]['L']**2)\n self.quads[key]['I'] = np.array([[ixx,0,0],[0,iyy,0],[0,0,izz]])\n self.quads[key]['invI'] = np.linalg.inv(self.quads[key]['I'])\n self.run = True\n\n def rotation_matrix(self,angles):\n ct = math.cos(angles[0])\n cp = math.cos(angles[1])\n cg = math.cos(angles[2])\n st = math.sin(angles[0])\n sp = math.sin(angles[1])\n sg = math.sin(angles[2])\n R_x = np.array([[1,0,0],[0,ct,-st],[0,st,ct]])\n R_y = np.array([[cp,0,sp],[0,1,0],[-sp,0,cp]])\n R_z = np.array([[cg,-sg,0],[sg,cg,0],[0,0,1]])\n R = np.dot(R_z, np.dot( R_y, R_x ))\n return R\n\n def wrap_angle(self,val):\n return( ( val + np.pi) % (2 * np.pi ) - np.pi )\n\n def setWind(self, wind_vec):\n self.randWind = wind_vec\n\n def setNormalWind(self,winds):\n self.XWind = winds[0]\n self.YWind = winds[1]\n self.ZWind = winds[2]\n\n\n def state_dot(self, time, state, key):\n state_dot = np.zeros(12)\n # The velocities(t+1 x_dots equal the t x_dots)\n state_dot[0] = self.quads[key]['state'][3]\n state_dot[1] = self.quads[key]['state'][4]\n state_dot[2] = self.quads[key]['state'][5]\n # The acceleration\n height = self.quads[key]['state'][2]\n #\n F_d = np.array([ 0, 0, 0])\n #\n air_density = 1.225 #kg/m^3\n C_d = 1\n cube_width = 0.1 # 10cm x 10cm cube as shape model of quadcopter\n A_yz = cube_width*cube_width\n A_xz = cube_width*cube_width\n A_xy = cube_width*cube_width\n\n A = [ A_yz , A_xz , A_xy ] # cross sectional area in each axis perpendicular to velocity axis\n\n #if wind is active the velocity in each axis is subject to wind\n nomX = self.XWind\n nomY = self.YWind\n nomZ = self.ZWind\n\n if(self.stepNum > 19500):\n self.stepNum = 0\n randX = self.randWind[0][self.stepNum]\n randY = self.randWind[1][self.stepNum]\n randZ = self.randWind[2][self.stepNum]\n\n\n #wind_velocity_vector = self.randWind\n\n wind_velocity_vector = [ nomX + randX , nomY + randY , nomZ + randZ] # wind velocity in each axis\n\n wind_vel_inertial_frame = np.dot(self.rotation_matrix(self.quads[key]['state'][6:9]) , wind_velocity_vector)\n V_b = [ state[0], state[1] , state[2]]\n V_a = wind_vel_inertial_frame - V_b\n #print(V_a)\n\n\n DragVector = [\n A[0] * (V_a[0] * abs(V_a[0])),\n A[1] * (V_a[1] * abs(V_a[1])),\n A[2] * (V_a[2] * abs(V_a[2]))\n ]\n #print(DragVector)\n\n F_d = [i * (0.5 * air_density * C_d) for i in DragVector]\n #form drag is a -0.5 and wind seems to be a +0.5. why??\n # the velocity is subtracted from wind ?\n\n\n #print(F_d)\n\n\n #Dryden turbulence random component\n # self.windMag\n\n x_dotdot = np.array([0,0,-self.quads[key]['weight']*self.g]) \\\n + np.dot(self.rotation_matrix(self.quads[key]['state'][6:9]),np.array([0,0,(self.quads[key]['m1'].thrust + self.quads[key]['m2'].thrust + self.quads[key]['m3'].thrust + self.quads[key]['m4'].thrust)]))/self.quads[key]['weight'] \\\n + F_d\n\n state_dot[3] = x_dotdot[0]\n state_dot[4] = x_dotdot[1]\n state_dot[5] = x_dotdot[2]\n # The angular rates(t+1 theta_dots equal the t theta_dots)\n state_dot[6] = self.quads[key]['state'][9]\n state_dot[7] = self.quads[key]['state'][10]\n state_dot[8] = self.quads[key]['state'][11]\n # The angular accelerations\n omega = self.quads[key]['state'][9:12]\n tau = np.array([self.quads[key]['L']*(self.quads[key]['m1'].thrust-self.quads[key]['m3'].thrust), self.quads[key]['L']*(self.quads[key]['m2'].thrust-self.quads[key]['m4'].thrust), self.b*(self.quads[key]['m1'].thrust-self.quads[key]['m2'].thrust+self.quads[key]['m3'].thrust-self.quads[key]['m4'].thrust)])\n omega_dot = np.dot(self.quads[key]['invI'], (tau - np.cross(omega, np.dot(self.quads[key]['I'],omega))))\n state_dot[9] = omega_dot[0]\n state_dot[10] = omega_dot[1]\n state_dot[11] = omega_dot[2]\n return state_dot\n\n def update(self, dt):\n self.stepNum +=1\n for key in self.quads:\n self.ode.set_initial_value(self.quads[key]['state'],0).set_f_params(key)\n self.quads[key]['state'] = self.ode.integrate(self.ode.t + dt)\n self.quads[key]['state'][6:9] = self.wrap_angle(self.quads[key]['state'][6:9])\n self.quads[key]['state'][2] = max(0,self.quads[key]['state'][2])\n #print(self.get_motor_speeds(key))\n\n def generate_wind_turbulence(self, h):\n\n height = float(h) * 3.28084\n airspeed = float(self.airspeed) * 3.28084\n\n\n mean = 0\n std = 1\n # create a sequence of 1000 equally spaced numeric values from 0 - 5\n t_p = np.linspace(0, 10, 20000)\n num_samples = 20000\n t_p = np.linspace(0, 10, 20000)\n\n\n # the random number seed used same as from SIMULINK blockset\n np.random.seed(23341)\n samples1 = 10 * np.random.normal(mean, std, size=num_samples)\n\n np.random.seed(23342)\n samples2 = 10 * np.random.normal(mean, std, size=num_samples)\n\n np.random.seed(23343)\n samples3 = 10 * np.random.normal(mean, std, size=num_samples)\n\n\n\n tf_u = u_transfer_function(height, airspeed)\n tf_v = v_transfer_function(height, airspeed)\n tf_w = w_transfer_function(height, airspeed)\n\n\n\n tout1, y1, x1 = signal.lsim(tf_u, samples1, t_p)\n # tout1, y1, x1 = signal.lsim(tf_u, n1, t_w)\n # covert obtained values to meters/second\n y1_f = [i * 0.305 for i in y1]\n tout2, y2, x2 = signal.lsim(tf_v, samples2, t_p)\n # tout2, y2, x2 = signal.lsim(tf_v, n2, t_w)\n y2_f = [i * 0.305 for i in y2]\n tout3, y3, x3 = signal.lsim(tf_w, samples3, t_p)\n # tout3, y3, x3 = signal.lsim(tf_w, n3, t_w)\n y3_f = [i * 0.305 for i in y3]\n\n return [ y1_f, y2_f , y3_f ]\n\n\n\n def set_motor_speeds(self,quad_name,speeds):\n self.quads[quad_name]['m1'].set_speed(speeds[0])\n self.quads[quad_name]['m2'].set_speed(speeds[1])\n self.quads[quad_name]['m3'].set_speed(speeds[2])\n self.quads[quad_name]['m4'].set_speed(speeds[3])\n # print(speeds)\n def get_motor_speeds(self,quad_name):\n return [self.quads[quad_name]['m1'].get_speed(), self.quads[quad_name]['m2'].get_speed(),\n self.quads[quad_name]['m3'].get_speed(), self.quads[quad_name]['m4'].get_speed()]\n\n def get_motor_speeds_rpm(self,quad_name):\n return [self.quads[quad_name]['m1'].get_speed(), self.quads[quad_name]['m2'].get_speed(),self.quads[quad_name]['m3'].get_speed(),self.quads[quad_name]['m4'].get_speed()]\n\n def get_position(self,quad_name):\n return self.quads[quad_name]['state'][0:3]\n\n def get_linear_rate(self,quad_name):\n return self.quads[quad_name]['state'][3:6]\n\n def get_orientation(self,quad_name):\n return self.quads[quad_name]['state'][6:9]\n\n def get_angular_rate(self,quad_name):\n return self.quads[quad_name]['state'][9:12]\n\n def get_state(self,quad_name):\n return self.quads[quad_name]['state']\n\n def set_position(self,quad_name,position):\n self.quads[quad_name]['state'][0:3] = position\n\n def set_orientation(self,quad_name,orientation):\n self.quads[quad_name]['state'][6:9] = orientation\n\n def get_time(self):\n return self.time\n\n def thread_run(self,dt,time_scaling):\n rate = time_scaling*dt\n last_update = self.time\n while(self.run==True):\n time.sleep(0)\n self.time = datetime.datetime.now()\n if (self.time-last_update).total_seconds() > rate:\n self.update(dt)\n last_update = self.time\n\n def stepQuad(self,dt=0.05):\n self.update(dt)\n return\n\n def set_motor_faults(self,quad_name, faults):\n f1 = faults[0]\n f2 =faults[1]\n f3 =faults[2]\n f4 =faults[3]\n self.quads[quad_name]['m1'].set_fault(f1)\n self.quads[quad_name]['m2'].set_fault(f2)\n self.quads[quad_name]['m3'].set_fault(f3)\n self.quads[quad_name]['m4'].set_fault(f4)\n\n return\n\n def start_thread(self, dt=0.002, time_scaling=1):\n self.thread_object = threading.Thread(target=self.thread_run, args=(dt, time_scaling))\n self.thread_object.start()\n\n def stop_thread(self):\n self.run = False\n\n\n\n# Low altitude Model\n# transfer function for along-wind\ndef u_transfer_function(height, airspeed):\n # turbulence level defines value of wind speed in knots at 20 feet\n # turbulence_level = 15 * 0.514444 # convert speed from knots to meters per second\n turbulence_level = 15\n length_u = height / ((0.177 + 0.00823 * height) ** (0.2))\n # length_u = 1750\n sigma_w = 0.1 * turbulence_level\n sigma_u = sigma_w / ((0.177 + 0.000823 * height) ** (0.4))\n num_u = [sigma_u * (math.sqrt((2 * length_u) / (math.pi * airspeed))) * airspeed]\n den_u = [length_u, airspeed]\n H_u = signal.TransferFunction(num_u, den_u)\n return H_u\n\n\n# transfer function for cross-wind\ndef v_transfer_function(height, airspeed):\n # turbulence level defines value of wind speed in knots at 20 feet\n # turbulence_level = 15 * 0.514444 # convert speed from knots to meters per second\n turbulence_level = 15\n length_v = height / ((0.177 + 0.00823 * height) ** (0.2))\n # length_v = 1750\n sigma_w = 0.1 * turbulence_level\n sigma_v = sigma_w / ((0.177 + 0.000823 * height) ** (0.4))\n b = sigma_v * (math.sqrt((length_v) / (math.pi * airspeed)))\n Lv_V = length_v / airspeed\n num_v = [(math.sqrt(3) * Lv_V * b), b]\n den_v = [(Lv_V ** 2), 2 * Lv_V, 1]\n H_v = signal.TransferFunction(num_v, den_v)\n return H_v\n\n\n# transfer function for vertical-wind\ndef w_transfer_function(height, airspeed):\n # turbulence level defines value of wind speed in knots at 20 feet\n # turbulence_level = 15 * 0.514444 # convert speed from knots to meters per second\n turbulence_level = 15\n length_w = height\n # length_w = 1750\n sigma_w = 0.1 * turbulence_level\n c = sigma_w * (math.sqrt((length_w) / (math.pi * airspeed)))\n Lw_V = length_w / airspeed\n num_w = [(math.sqrt(3) * Lw_V * c), c]\n den_w = [(Lw_V ** 2), 2 * Lw_V, 1]\n H_v = signal.TransferFunction(num_w, den_w)\n return H_v\n\n","sub_path":"Quadcopter - Reinforcement Learning/quadcopter.py","file_name":"quadcopter.py","file_ext":"py","file_size_in_byte":13626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"380560367","text":"# 1명은 치킨, 3명은 커피\r\n# 1~20 중 무작위로\r\n# 중복 불가\r\n# random module의 shuffle과 sample을 사용\r\n\r\n# shuffle(CLASSNAME) returns randomly sorted CLASSNAME\r\n# sample(CLASSNAME, NUM) returns randomly chosen sample of number NUM from CLASSNAME\r\n\r\nfrom random import sample, shuffle\r\n\r\nnumList = list(range(1,21))\r\nshuffle(numList)\r\nwinners = sample(numList, 4)\r\nprint(\"치킨쿠폰: {chicken}\\n커피쿠폰: {coffee}\".format(chicken=winners[0], coffee=winners[1:]))","sub_path":"Python_Basic/Data Structure/Quiz.py","file_name":"Quiz.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"376704829","text":"import os\nimport json\nimport time\nfrom tensorboard_logger import Logger\n\n\nclass FileWriter:\n \"\"\"\n Base Writer\n - Records progress to tensorboard\n - Saves metrics to folder structure\n - Saves model checkpoints\n\n Args:\n name (str): Name of dataset (which matches file in ./data)\n params (dict): Dict of parameters\n fold (int): ID of fold\n base_output_loc (str): Base location to store results\n \"\"\"\n\n class Mode:\n \"\"\"Util to tell what mode currently in\"\"\"\n TRAIN = 0\n VALID = 1\n TEST = 2\n\n def __init__(self,\n dataset_name,\n params: dict,\n fold=None,\n base_output_loc='./__experiments__'):\n self.dataset = dataset_name\n self.base_dir = base_output_loc\n self.fold = fold\n self.params = params\n self.output_dir = self._create_base_output_folder(base_output_loc)\n\n # Configure tensorboard\n self.train_writer = Logger(self.output_dir + '/train')\n # Will create train + valid writer later if needed later\n self.valid_writer = None\n self.test_writer = None\n\n\n\n def write(self, name, value, step=None, mode=Mode.TRAIN):\n \"\"\"\n Write metric to tensorboard\n :param name: (str) Metric name\n :param value: (float) Metric value\n :param step: (int, optional) Current step\n :param mode: (Mode: TRAIN, VALID or TEST)\n :return:\n \"\"\"\n # Get appropriate writer based on mode\n writer = self._get_writer(mode)\n writer.log_value(name, value, step)\n\n def write_test_summary(self, summary: dict, filename='summary.csv'):\n \"\"\"\n Write summary metrics to csv row and appends to summary csv file\n - Adds in parameters to row\n - If summary.csv does not exist then make\n :param summary: (dict) metrics\n :param filename: (str, optional) filename\n :return:\n \"\"\"\n # Get location of summary file\n output_file = self.base_dir + '/' + filename\n\n # Create static columns\n columns = ['timestamp'] + ['fold']\n values = [time.strftime('%Y-%m-%d %H:%M:%S')] + [str(self.fold)]\n\n # Add parameter columns\n parameter_cols = [p for p in list(sorted(self.params.keys())) if p != 'loc']\n columns += parameter_cols\n values += [str(self.params[k]).replace(',', '-') for k in parameter_cols]\n\n # Add metric columns\n columns += list(sorted(summary.keys()))\n values += [str(summary[k]).replace(',', '-') for k in list(sorted(summary.keys()))]\n\n if not os.path.exists(output_file):\n # Write header to file\n with open(output_file, 'w') as f:\n f.write(','.join(columns) + '\\n')\n # Write score\n with open(output_file, 'a') as f:\n row = ','.join(values)\n f.write(str(row) + '\\n')\n\n def save_checkpoint(self, model: dict, epoch=0):\n \"\"\"\n Save model to checkpoint .json file\n\n :param model: (dict) Model representation as dict\n :param epoch: (int) Current epoch\n :return:\n \"\"\"\n output_loc = self.output_dir + '/checkpoints'\n if not os.path.exists(output_loc):\n os.makedirs(output_loc)\n filename = 'model_checkpoint_epoch_%s.json' % epoch\n with open(output_loc + '/' + filename, 'w') as f:\n json.dump(model, f)\n\n\n def _create_base_output_folder(self, base_output_loc: str):\n \"\"\"\n Create the base folder structure for experiment dataset\n :base_output_loc: (str) Parent folder to create experiment folder\n :return: (str) Location of base dataset folder\n \"\"\"\n # Convert dict into neat path\n key_params = {\n 'err': self.params['loss_type'],\n 'layers': str(self.params['layer_sizes']),\n 'act': self.params['layer_activations'],\n 'seed': self.params['random_seed'],\n 'eta': self.params['eta']}\n\n id = self._replace_multiple(str(key_params), ['{', '}', ':', ',', \"'\", \" \", '=_'],\n ['', '', '=', '', \"\", \"_\", '='])\n if self.fold is None:\n path = '%s/%s/%s_%s' % (base_output_loc, self.dataset, self.dataset, id)\n else:\n path = '%s/%s/%s_%s/fold_%s' % (\n base_output_loc, self.dataset, self.dataset, id, self.fold)\n if not os.path.exists(path):\n os.makedirs(path)\n return path\n\n\n def _get_writer(self, mode: Mode):\n \"\"\"\n Util - Gets the filewriter from mode\n :param mode:\n :return:\n \"\"\"\n if mode == FileWriter.Mode.TRAIN:\n writer = self.train_writer\n elif mode == FileWriter.Mode.VALID:\n if self.valid_writer is None:\n self.valid_writer = Logger(self.output_dir + '/valid')\n writer = self.valid_writer\n elif mode == FileWriter.Mode.TEST:\n if self.test_writer is None:\n self.test_writer = Logger(self.output_dir + '/test')\n writer = self.test_writer\n else:\n raise NotImplementedError\n return writer\n\n def _replace_multiple(self, s: str, chars_replace: [str], chars_insert: [str]):\n \"\"\"\n # Util - Replaces multiple chars by iterate over the string\n :param s: (str) Target string\n :param chars_replace: (list (str)) Characters to replace\n :param chars_insert: (list (str)) Corresponding replacement characters\n :return:\n \"\"\"\n assert len(chars_replace) == len(chars_insert), \\\n 'Chars to replace must be equal to chars to insert!'\n for i, elem in enumerate(chars_replace):\n s = s.replace(elem, chars_insert[i]) if elem in s else s\n return s\n","sub_path":"writer/base_writer.py","file_name":"base_writer.py","file_ext":"py","file_size_in_byte":5882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"100183106","text":"import os\nimport requests\nimport http.cookiejar as cookielib\nimport re\nimport io\nimport json\nimport time\n\nimport numpy as np\n\nfrom lxml import etree\n\nimport base.help as hp\n\n\ndef fromlocal():\n data = dict()\n\n rootdir = './res/choice'\n jsonlist = os.listdir(rootdir) #列出文件夹下所有的目录与文件\n for i in range(0,len(jsonlist)):\n path = os.path.join(rootdir,jsonlist[i])\n\n if os.path.isfile(path):\n with open(path, 'r', encoding='utf-8') as f:\n try:\n while True:\n line = f.readline()\n if line:\n # only 1 line exist.\n data[f.name] = json.loads(line)['stocks']\n else:\n break\n except:\n f.close()\n\n return data\n \ndef fromxueqiu():\n with requests.session() as session:\n \n session.cookies = cookielib.LWPCookieJar(filename=\"cookies\")\n try:\n session.cookies.load(ignore_discard=True)\n except:\n print(\"Cookie can't load\")\n\n agent = 'Mozilla/5.0 (Windows NT 5.1; rv:33.0) Gecko/20100101 Firefox/33.0'\n headers = {'Host': 'xueqiu.com',\n 'Referer': 'https://xueqiu.com/',\n 'Origin':'https://xueqiu.com',\n 'User-Agent': agent}\n\n data = {'username':'630571565@qq.com','password':'xq801226'}\n\n url='https://xueqiu.com/snowman/login'\n s = session.post(url,data=data,headers=headers)\n session.cookies.save()\n \n '''\n # all\n url = 'https://xueqiu.com/v4/stock/portfolio/stocks.json?size=1000&tuid=6944351461&pid=-1&category=2&type=1'\n \n # in process\n https://xueqiu.com/v4/stock/portfolio/stocks.json?size=1000&tuid=6944351461&pid=16&category=2&type=2&_=1512610504848\n # working\n https://xueqiu.com/v4/stock/portfolio/stocks.json?size=1000&tuid=6944351461&pid=29&category=2&type=2&_=1512610504753\n # nengyuan\n https://xueqiu.com/v4/stock/portfolio/stocks.json?size=1000&tuid=6944351461&pid=11&category=2&type=2&_=1512610504864\n # 乐\n https://xueqiu.com/v4/stock/portfolio/stocks.json?size=1000&tuid=6944351461&pid=25&category=2&type=2&_=1512610504875\n #衣\n https://xueqiu.com/v4/stock/portfolio/stocks.json?size=1000&tuid=6944351461&pid=28&category=2&type=2&_=1512610504888\n #食\n https://xueqiu.com/v4/stock/portfolio/stocks.json?size=1000&tuid=6944351461&pid=27&category=2&type=2&_=1512610504899\n #住\n https://xueqiu.com/v4/stock/portfolio/stocks.json?size=1000&tuid=6944351461&pid=31&category=2&type=2&_=1512610504913\n #行\n https://xueqiu.com/v4/stock/portfolio/stocks.json?size=1000&tuid=6944351461&pid=26&category=2&type=2&_=1512610504961\n #金\n https://xueqiu.com/v4/stock/portfolio/stocks.json?size=1000&tuid=6944351461&pid=30&category=2&type=2&_=1512610504942\n #辅\n https://xueqiu.com/v4/stock/portfolio/stocks.json?size=1000&tuid=6944351461&pid=32&category=2&type=2&_=1512610504961\n\n #沪深\n https://xueqiu.com/v4/stock/portfolio/stocks.json?size=1000&tuid=6944351461&pid=18&category=2&type=5&_=1512610504998\n '''\n\n data = dict()\n urlTpl = 'https://xueqiu.com/v4/stock/portfolio/stocks.json?size=1000&tuid=6944351461&pid={0}&category=2&type=2'\n for i in np.arange(16, 17):\n url = \"https://xueqiu.com/v4/stock/portfolio/stocks.json?size=1000&tuid=6944351461&pid=26&category=2&type=2&_=1512610504961\"\n\n response=session.get(url,headers=headers)\n if response.status_code != 200 or response.content is None:\n print('failed to get: ', url)\n print('status: ', response.status_code, '; content: ', hp.bytetostr(response.content))\n continue\n\n mychoice = response.json()\n stocks = mychoice['stocks']\n if stocks:\n data[i] = stocks\n\n return data\n\n","sub_path":"security/choice.py","file_name":"choice.py","file_ext":"py","file_size_in_byte":4107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"190724922","text":"import time\n\nfrom diplomamunka.main.dao.DatasetType import DatasetType\nfrom diplomamunka.main.service.util.Metrics import Metrics, CalculateTopN\nfrom surprise import KNNBaseline\n\nclass RecommenderAlgorithm:\n\n def __init__(self, algorithm, name, datasetName):\n self.algorithm = algorithm\n self.name = name\n self.datasetName = datasetName\n\n # use stopwatch here\n # will return metrics here\n def evaluate(self, trainSet, testSet, popularityRankings):\n print(\"\\nEvaluating dataset [{}] using algorithm [{}]...START!\\n\".format(self.datasetName, self.name))\n metrics = Metrics(self.name, self.datasetName)\n ratingThreshold = 8 if self.datasetName == DatasetType.JESTER.value else 4\n startTime = time.time()\n\n # do some stuff here\n # fit - Here the model learns from the trainSet\n print(\"Fitting...START!\")\n self.algorithm.fit(trainSet)\n print(\"Fitting ...END!\")\n\n # test - Here the model tries to make predictions from a testSet,\n # depending on the knowledge it gathered while learning from the trainSet\n # Required for MAE, RMSE and topNPredicted\n print(\"Testing...START!\")\n predictions = self.algorithm.test(testSet)\n print(\"Testing...END!\")\n\n # Required for Coverage, Diversity and Novelty\n print(\"Calculating top-N predictions...START!\")\n topNPredicted = CalculateTopN(predictions, minimumRating=ratingThreshold)\n print(\"Calculating top-N predictions...END!\")\n\n # Required for Diversity\n print(\"Calculating similarity matrix...START!\")\n similarityMatrix = KNNBaseline(sim_options={'name': 'pearson', 'user_based': False})\n similarityMatrix.fit(trainSet)\n print(\"Calculating similarity matrix...END!\")\n\n metrics.calculateMetrics(predictions, topNPredicted, trainSet.n_users, similarityMatrix, popularityRankings, ratingThreshold=ratingThreshold)\n\n endTime = time.time()\n\n # Scalability - Runtime\n wholeProcessInSeconds = endTime - startTime\n\n # calculate metrics + add runtime to the Metrics object\n # return with the Metrics object\n metrics.setScalability(wholeProcessInSeconds)\n print(\"\\nEvaluating dataset [{}] using algorithm [{}]...DONE!\\n\".format(self.datasetName, self.name))\n return metrics\n\n def getAlgorithm(self):\n return self.algorithm\n\n def getAlgorithmName(self):\n return self.name\n\n def setDatasetName(self, datasetName):\n self.datasetName = datasetName\n","sub_path":"venv/diplomamunka/main/service/recommender/algorithm/RecommenderAlgorithm.py","file_name":"RecommenderAlgorithm.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"379794421","text":"from rest_framework.views import APIView\nfrom customadmin.models import ServiceCategory, Service, TimeSlot, BookedService, TransactionDetail\nfrom ..serializers import ServiceCategoryListingSerializer, ServiceListingSerializer, ServiceTimeSlotSerializer, TransactionDetailSerializer, ServiceListingByCategorySerializer, ServiceDetailSerializer\nfrom numerology.permissions import get_pagination_response\nfrom numerology.helpers import custom_response\nfrom rest_framework import status\nfrom numerology.permissions import IsAccountOwner\nfrom numerology.utils import MyStripe, create_card_object, create_customer_id, create_charge_object\n\n\nclass ServiceCategoryListingAPIView(APIView):\n \"\"\"\n Service Category listing View\n \"\"\"\n serializer_class = ServiceCategoryListingSerializer\n\n def get(self, request):\n service_categories = ServiceCategory.objects.filter(active=True)\n result = get_pagination_response(service_categories, request, self.serializer_class, context = {\"request\": request})\n message = \"Service Categories fetched Successfully!\"\n return custom_response(True, status.HTTP_200_OK, message, result)\n\n\nclass ServiceListingAPIView(APIView):\n \"\"\"\n Service listing View\n \"\"\"\n serializer_class = ServiceListingByCategorySerializer\n\n def get(self, request):\n categories = ServiceCategory.objects.filter(active=True)\n result = get_pagination_response(categories, request, self.serializer_class, context = {\"request\": request})\n message = \"Services fetched Successfully!\"\n return custom_response(True, status.HTTP_200_OK, message, result)\n\n\nclass PackageListingAPIView(APIView):\n \"\"\"\n Service listing View\n \"\"\"\n serializer_class = ServiceListingSerializer\n\n def get(self, request):\n services = Service.objects.filter(active=True)\n result = self.serializer_class(services, many=True, context = {\"request\": request})\n message = \"Services fetched Successfully!\"\n return custom_response(True, status.HTTP_200_OK, message, result.data)\n\n\n\nclass ServiceDetailAPI(APIView):\n \"\"\"\n Service listing View\n \"\"\"\n serializer_class = ServiceDetailSerializer\n\n def get(self, request, pk):\n service = Service.objects.filter(active=True, pk=pk)\n if not service:\n message = \"Service not found!\"\n return custom_response(True, status.HTTP_200_OK, message)\n serializer = self.serializer_class(service[0], context = {\"request\": request})\n message = \"Service fetched Successfully!\"\n return custom_response(True, status.HTTP_200_OK, message, serializer.data)\n\n\nclass ServiceTimeSlotsAPIView(APIView):\n \"\"\"\n Service listing View\n \"\"\"\n serializer_class = ServiceTimeSlotSerializer\n\n def get(self, request):\n time_slots = TimeSlot.objects.filter(active=True)\n result = get_pagination_response(time_slots, request, self.serializer_class, context = {\"request\": request})\n message = \"Service time slots fetched Successfully!\"\n return custom_response(True, status.HTTP_200_OK, message, result)\n\n\nclass ServiceBookingAPIView(APIView):\n \"\"\"\n API View to purchase product\n \"\"\"\n permission_classes = (IsAccountOwner,)\n\n def post(self, request, format=None):\n \"\"\"POST method to create the data\"\"\"\n try:\n if \"service\" not in request.data:\n message = \"Service are required!\"\n return custom_response(False, status.HTTP_400_BAD_REQUEST, message)\n\n if \"service_date\" not in request.data or \"service_time\" not in request.data:\n message = \"Service date and time are required!\"\n return custom_response(False, status.HTTP_400_BAD_REQUEST, message)\n\n if \"total_amount\" not in request.data :\n message = \"total_amount is required!\"\n return custom_response(False, status.HTTP_400_BAD_REQUEST, message)\n\n check_booking = BookedService.objects.filter(service_date=request.data['service_date'], service_time=request.data['service_time'])\n if check_booking:\n message = \"This time slot is already booked. Please select another.\"\n return custom_response(False, status.HTTP_400_BAD_REQUEST, message)\n\n\n if \"card_id\" in request.data:\n card_id = request.data[\"card_id\"]\n stripe = MyStripe()\n customer_id = request.user.customer_id\n\n if not customer_id:\n newcustomer = create_customer_id(request.user)\n customer_id = newcustomer.id\n print(\"<<<-----|| CUSTOMER CREATED ||----->>>\")\n newcard = stripe.create_card(customer_id, request.data)\n data = create_card_object(newcard, request)\n card_id = newcard.id\n print(\"<<<-----|| CARD CREATED ||----->>>\")\n\n newcharge = stripe.create_charge(request.data, card_id, customer_id)\n charge_object = create_charge_object(newcharge, request)\n\n chargeserializer = TransactionDetailSerializer(data=charge_object)\n if chargeserializer.is_valid():\n chargeserializer.save()\n print(\"<<<-----|| TransactionDetail CREATED ||----->>>\")\n\n transaction = TransactionDetail.objects.filter(pk=chargeserializer.data['id'])\n\n service = Service.objects.filter(pk=request.data['service'])\n booked_service = BookedService()\n booked_service.service = service[0]\n booked_service.booking_charge = service[0].booking_charge\n booked_service.user = request.user\n booked_service.service_date = request.data['service_date']\n booked_service.service_time = request.data['service_time']\n booked_service.transaction_detail = transaction[0]\n booked_service.save()\n message = \"Service booked successfully!\"\n return custom_response(True, status.HTTP_201_CREATED, message)\n else:\n message = \"Card_id is required\"\n return custom_response(False, status.HTTP_400_BAD_REQUEST, message)\n\n except Exception as inst:\n print(inst)\n message = str(inst)\n return custom_response(False, status.HTTP_400_BAD_REQUEST, message)","sub_path":"app/api/views/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":6496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"652452891","text":"import requests\nfrom bs4 import BeautifulSoup\n\nresponse = requests.get(\"https://stackoverflow.com/questions\")\n# response.text # returns the HTML content of this webpage\n\n# soup mirrors the structure of our HTML document\n# so we can easily navigate this HTML and find various elements\nsoup = BeautifulSoup(response.text, \"html.parser\")\n\nquestions = soup.select(\".question-summary\")\n\n# print(type(questions[0])) # each question is a type of the `Tag` class\n# print(questions[0].attrs)\n\n# if this attribute doesn't exist we will get an exception\n# print(questions[0][\"id\"]) # we can get each attribute by its key\n\n# safer way is to use the get() method\n# print(questions[0].get(\"id\", 0))\n\n# let's get the title for each question\n# Tag object also has a select() method like its super object\n# print(questions[0].select(\".question-hyperlink\"))\n\n# since we don't need a list, we can use select_one() to return one object.\n# this is useful in cases where we know we're dealing with one single element\n# print(questions[0].select_one(\".question-hyperlink\"))\n\n# let's get the content of our tag\n# print(questions[0].select_one(\".question-hyperlink\").getText())\n\n# iterate over questions and get the title of each\nfor question in questions:\n print(question.select_one(\".question-hyperlink\").getText())\n print(question.select_one(\".vote-count-post\").getText())\n","sub_path":"machine learning/complete_python_programming_for_beginners/popular python packages/pycrawler/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"244340858","text":"import random\nfrom functools import reduce\n\nprint(\"Atk Speed e Duração do Engage Devem Ser Informadas em Segundos\")\n\ndef weapon_damage(damage_min, damage_max, atk_speed, engage_duration):\n\n total_hits = engage_duration/atk_speed\n hit_arr = range(0, int(total_hits+1))\n hits_damage_storage = []\n\n for x in hit_arr:\n hit_damage = float(random.randint(damage_min, damage_max+1))\n hits_damage_storage.append(hit_damage)\n\n damage_per_second = reduce((lambda a, b: a + b), hits_damage_storage)\n print(\"Total damage:\",damage_per_second)\n print(\"DPS:\", damage_per_second/engage_duration)\n\nplayer_min_input = int(input(\"Digite o Dano Mínimo de Sua Arma\"))\nplayer_max_input = int(input(\"Digite o Dano Máximo de Sua Arma\"))\nplayer_atk_speed_input = float(input(\"Digite a AtkSpeed de Sua Arma\"))\nplayer_engage_duration_input = float(input(\"Digite a Duração da Luta\"))\nweapon_damage(player_min_input, player_max_input, player_atk_speed_input, player_engage_duration_input)","sub_path":"Mini Recount.py","file_name":"Mini Recount.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"20183130","text":"import pandas as pd\nimport numpy as np\nimport os\nimport shutil\nfrom keras import Sequential\nfrom sklearn.model_selection import train_test_split\nfrom keras.utils import np_utils\nfrom keras.layers import Dense, Activation, Dropout\nfrom keras.layers import Conv2D\nfrom keras.layers.pooling import MaxPooling2D\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.core import Flatten\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras import optimizers\nfrom keras import callbacks\nfrom keras import regularizers\nfrom keras.optimizers import Adam\nfrom keras.models import load_model\nfrom PIL import Image\nimport cv2\n\n# model_square = load_model('opencv_classifier_14_rectangle_973.h5')\n# model_circle = load_model('opencv_classifier_15_circle_956.h5')\n# model_triangle = load_model('opencv_classifier_21_triangle_943.h5')\nLOADING_PATH = 'train_save_pic/'\nOUTPUT_PATH = 'opencv_result/'\nsgd = optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True)\nadadelta = optimizers.Adadelta(lr=0.50, rho=0.95, epsilon=1e-06)\nDATANUM = 6400\n\n\n#生成图片\ndef image_output(path = None):\n result_frame = pd.read_csv('./train.csv')\n result = result_frame[['square', 'triangle', 'circle']].values\n labels = 4 * result[:DATANUM, 0] + 2 * result[:DATANUM, 1] + result[:DATANUM, 2] - 1\n for idx in range(1, 8000):\n if idx >= DATANUM:\n break\n label_path = os.path.join('./train_save_pic', str(labels[idx]))\n if not os.path.exists(label_path):\n os.mkdir(label_path)\n file_path = os.path.join(LOADING_PATH + \"hackthoon308train0\" + str(idx).zfill(4) + \".jpg\")\n print(labels[idx], file_path)\n shutil.copy(file_path, label_path)\n\n\nif __name__ == '__main__':\n # image_output()\n\n model = Sequential()\n # conv layer 1 as follows\n model.add(Conv2D(nb_filter=96, nb_row=3, nb_col=3, border_mode='same', input_shape=(48, 48, 3), activation='relu',\n kernel_regularizer=regularizers.l2(0.02)))\n model.add(Dropout(0.5))\n\n # pooling layer 1 as follows\n model.add(MaxPooling2D(\n pool_size=(2, 2),\n strides=(2, 2),\n border_mode='same'))\n model.add(BatchNormalization())\n # conv layer 2 as follows\n model.add(Conv2D(128, 3, 3, border_mode='same', activation='relu'))\n model.add(Dropout(0.5))\n\n ########################\n model.add(Flatten())\n\n model.add(Dense(96, activation='relu'))\n model.add(Dropout(0.5))\n\n ########################\n model.add(Dense(7, activation='softmax'))\n\n ########################\n adam = Adam(lr=1e-4)\n\n ########################\n model.compile(optimizer=adadelta,\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n train_datagen = ImageDataGenerator(\n rescale=1. / 255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\n\n train_generator = train_datagen.flow_from_directory(\n './train_save_pic',\n target_size=(48, 48),\n batch_size=32,\n class_mode='categorical')\n\n model.fit_generator(\n train_generator,\n steps_per_epoch=2000,\n epochs=5)\n\n # 载入测试集\n X = []\n for i in range(1600):\n path = LOADING_PATH + \"hackthoon308train0\" + str(i).zfill(4) + \".jpg\"\n img = cv2.imread(path)\n img = cv2.resize(img, (48, 48), interpolation=cv2.INTER_NEAREST)\n X.append(img)\n X = np.array(X)\n result_frame = pd.read_csv('./train.csv')\n result = result_frame[['square', 'triangle', 'circle']].values\n labels = 4 * result[:1600, 0] + 2 * result[:1600, 1] + result[:1600, 2] - 1\n\n y_test = np_utils.to_categorical(labels, num_classes=7)\n\n print(\"load complete!\")\n loss, accuracy = model.evaluate(X, y_test)\n print(\"process complete.\")\n print('\\ntest loss: ', loss)\n print('\\ntest accuracy: ', accuracy)\n\n model.save('opencv_classifier_21.h5') # HDF5文件,pip install h5py\n print('\\nSuccessfully saved as opencv_classifier_21.h5')","sub_path":"OpenCV_classification/Gnerate_opencv.py","file_name":"Gnerate_opencv.py","file_ext":"py","file_size_in_byte":4029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"163939349","text":"\"\"\"\nCoordinate Transformation Visualization\n\n\nAuthor: Zhongyi Zhou\nABB\nApr. 24, 2018\n\"\"\"\n\nimport numpy as np\nfrom mayavi import mlab\nimport xlrd\n\ndef read_sheet(filename):\n \"\"\"\n This function is used to read the excel file.\n The recording format should be as follow:\n No. X Y Z alpha beta gamma\n 1\n 2\n 3\n 4\n 5\n 6\n .\n .\n .\n\n So the function will read the data from B1 to the end.\n\n :param filename: excel filepath\n :return: measured data\n \"\"\"\n data = xlrd.open_workbook(filename)\n table = data.sheets()[0]\n DOF = table.ncols - 1\n sample_num = table.nrows - 1\n mea_data = np.zeros((sample_num, DOF))\n print(\"sample number =\", sample_num)\n print(\"degree of freedom =\", DOF)\n for i in range(1, table.nrows):\n for j in range(1, table.ncols):\n # print(table.cell(i, j).value)\n mea_data[i - 1, j - 1] = table.cell(i, j).value\n\n return mea_data\n\n\ndef axis_plot(kp_arr):\n \"\"\"\n Plot the axiss according to 4 key points.\n RGB -> XYZ\n\n :param kp_arr: key points array\n :return: None\n \"\"\"\n if kp_arr.shape != (4,4):\n print(\"kp_arr format error!\")\n else:\n mlab.plot3d([kp_arr[0,0],kp_arr[0,1]], [kp_arr[1,0],kp_arr[1,1]], [kp_arr[2,0],kp_arr[2,1]], color = (1,0,0), tube_radius = 1)\n mlab.plot3d([kp_arr[0,0],kp_arr[0,2]], [kp_arr[1,0],kp_arr[1,2]], [kp_arr[2,0],kp_arr[2,2]], color = (0,1,0), tube_radius = 1)\n mlab.plot3d([kp_arr[0,0],kp_arr[0,3]], [kp_arr[1,0],kp_arr[1,3]], [kp_arr[2,0],kp_arr[2,3]], color = (0,0,1), tube_radius = 1)\n\ndef axis_plot_all(axis_list):\n \"\"\"\n Plot all the coordinate axis in the axis_list\n RGB -> XYZ\n\n :param axis_list: list of key points arrays\n :return: None\n \"\"\"\n for arr in axis_list:\n axis_plot(arr)\n mlab.show()\n\ndef visual_step_by_step(homo_model, axis_base_arr, test_ind):\n \"\"\"\n This function helps beginners do coordinate transformation and visualize it step by step.\n\n :param homo_model: dictionary data loaded from .npz file, containing the transformation data\n :param axis_base_arr: the base axis array\n :param test_ind: indicator of test image.\n :return: None\n \"\"\"\n objbase2objstd_list = homo_model['objbase2objstd_list']\n objstd2obj_list = homo_model['objstd2obj_list']\n obj2cam_list = homo_model['obj2cam_list']\n cam2board = homo_model['cam2board'] \n board2std = homo_model['board2std']\n std2base = homo_model['std2base']\n print(\"data loading success!\")\n\n # base\n axis_base = axis_base_arr\n axis_list.append(axis_base)\n print(\"axis_base =\", axis_base)\n axis_plot_all(axis_list)\n\n # std\n axis_std = np.dot(std2base, axis_base_arr)\n axis_list.append(axis_std)\n print(\"axis_std =\", axis_std)\n axis_plot_all(axis_list)\n\n # board\n axis_board = std2base @ board2std @ axis_base_arr\n axis_list.append(axis_board)\n print(\"axis_board =\", axis_board)\n axis_plot_all(axis_list)\n\n #camera\n #axis_cam = np.dot(cam2board, axis_board)\n axis_cam = std2base @ board2std @ cam2board @ axis_base_arr\n axis_list.append(axis_cam)\n print(\"axis_cam =\", axis_cam)\n axis_plot_all(axis_list)\n\n #object\n obj2cam = obj2cam_list[test_ind]\n #axis_obj = obj2cam @ axis_cam\n axis_obj = std2base @ board2std @ cam2board @ obj2cam @ axis_base_arr\n axis_list.append(axis_obj)\n print(\"axis_obj =\", axis_obj)\n axis_plot_all(axis_list)\n\n #base2\n objstd2obj = objstd2obj_list[test_ind]\n axis_objstd = std2base @ board2std @ cam2board @ obj2cam @ objstd2obj @ axis_base_arr\n axis_list.append(axis_objstd)\n print(\"axis_objstd =\", axis_objstd)\n axis_plot_all(axis_list)\n\n objbase2objstd = objbase2objstd_list[test_ind]\n axis_objbase = std2base @ board2std @ cam2board @ obj2cam @ objstd2obj @ objbase2objstd @ axis_base_arr\n axis_list.append(axis_objbase)\n print(\"axis_objbase =\", axis_objbase)\n axis_plot_all(axis_list)\n\n\nwld_excel_path = \"./0326_measurement/wld_data.xlsx\"\n\n# base\naxis_base = [[0.0, 0.0, 0.0, 1.0],[50.0, 0.0, 0.0, 1.0],[0.0, 50.0, 0.0, 1.0],[0.0, 0.0, 50.0, 1.0]]\naxis_list = []\naxis_base_arr = np.array(axis_base).T\nprint(\"axis_base_arr =\", axis_base_arr)\naxis_list.append(axis_base_arr)\nprint(\"first trans display finished!\")\n\n# read data\ncam_fig_path = \"./0326_measurement/prl/left/\"\ndatafile = cam_fig_path + \"/homo_trans.npz\"\nhomo_model = np.load(datafile)\nobjbase2objstd_list = homo_model['objbase2objstd_list']\nobjstd2obj_list = homo_model['objstd2obj_list']\nobj2cam_list = homo_model['obj2cam_list']\ncam2board = homo_model['cam2board'] \nboard2std = homo_model['board2std']\nstd2base = homo_model['std2base']\nprint(\"data loading success!\")\n\nDOF6 = read_sheet(wld_excel_path)\ntest_ind = 2\nvisual_step_by_step(homo_model, axis_base_arr, test_ind)\n","sub_path":"cdn_trans_visual.py","file_name":"cdn_trans_visual.py","file_ext":"py","file_size_in_byte":4818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"588957601","text":"from nltk import SnowballStemmer\nfrom nltk import clean_html\nfrom nltk.tokenize import word_tokenize, sent_tokenize\nfrom nltk.corpus import stopwords\nimport re\n\n# Tokenizing (Document to list of sentences. Sentence to list of words.)\ndef tokenize(txt):\n '''Tokenize (sentence then words) filter out punctuation and change to lower case.'''\n tokens = []\n sentences = sent_tokenize(txt.replace(\"'\", \"\"))\n sentences = [\" \".join(re.findall(r'\\w+', s, flags = re.UNICODE | re.LOCALE)).lower() for s in sentences]\n for stn in sentences:\n tokens += word_tokenize(stn)\n return tokens\n\n\n# The preprocess pipeline. Returns as lists of tokens or as string.\n# If stemmer_type = False or not supported then no stemming.\ndef tokenize_and_stem(txt, stem=True, remove_html=True, join=False, remove_stopwords=True):\n ''' Remove html and stopwords, tokenize and stem. '''\n\n lang = 'english'\n if remove_html:\n txt = clean_html(txt)\n\n words = tokenize(txt)\n if remove_stopwords:\n stop_words = stopwords.words(lang)\n words = [w for w in words if w.lower() not in stop_words]\n\n if stem:\n stemmer = SnowballStemmer(lang)\n words = [stemmer.stem(word).encode(encoding=\"utf8\") for word in words]\n\n if join:\n words = \" \".join(words)\n\n return words\n\n","sub_path":"stumble/python/tokenizer.py","file_name":"tokenizer.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"13463987","text":"#\n# Copyright (c) YugaByte, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except\n# in compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under the License\n# is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing permissions and limitations\n# under the License.\n#\n\nimport os\nimport sys\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\nfrom build_definitions import *\n\n\nPROJECT_CONFIG = \"\"\"\nlibraries = {5} ;\n\nusing {0} : {1} :\n {2} :\n {3}\n {4} ;\n\"\"\"\n\n\nclass BoostDependency(Dependency):\n def __init__(self):\n super(BoostDependency, self).__init__(\n 'boost', '1.69.0',\n 'https://dl.bintray.com/boostorg/release/{0}/source/boost_{1}.tar.bz2',\n BUILD_GROUP_INSTRUMENTED)\n self.dir = '{}_{}'.format(self.name, self.underscored_version)\n self.copy_sources = True\n self.patches = ['boost-1-69-remove-pending-integer_log2-include.patch']\n self.patch_strip = 1\n\n def build(self, builder):\n libs = ['system', 'thread']\n\n log_prefix = builder.log_prefix(self)\n log_output(log_prefix, ['./bootstrap.sh', '--prefix={}'.format(builder.prefix)])\n project_config = 'project-config.jam'\n with open(project_config, 'rt') as inp:\n original_lines = inp.readlines()\n with open(project_config, 'wt') as out:\n for line in original_lines:\n lstripped = line.lstrip()\n if not lstripped.startswith('libraries =') and \\\n not lstripped.startswith('using gcc ;') and \\\n not lstripped.startswith('project : default-build gcc ;'):\n out.write(line)\n cxx_flags = builder.compiler_flags + builder.cxx_flags\n if '-nostdinc++' in cxx_flags:\n cxx_flags.remove('-nostdinc++')\n compiler_type = builder.compiler_type\n compiler_version = ''\n if compiler_type == 'gcc8':\n compiler_type = 'gcc'\n compiler_version = '8'\n out.write(PROJECT_CONFIG.format(\n compiler_type,\n compiler_version,\n builder.get_cxx_compiler(),\n ' '.join(['' + flag for flag in cxx_flags]),\n ' '.join(['' + flag for flag in cxx_flags + builder.ld_flags]),\n ' '.join(['--with-{}'.format(lib) for lib in libs])))\n log_output(log_prefix, ['./b2', 'install', 'cxxstd=14'])\n\n if is_mac():\n for lib in libs:\n path = os.path.join(builder.prefix_lib, self.libfile(lib, builder))\n log_output(log_prefix, ['install_name_tool', '-id', path, path])\n for sublib in libs:\n sublib_file = self.libfile(sublib, builder)\n sublib_path = os.path.join(builder.prefix_lib, sublib_file)\n log_output(log_prefix, ['install_name_tool', '-change', sublib_file,\n sublib_path, path])\n\n def libfile(self, lib, builder):\n return 'libboost_{}.{}'.format(lib, builder.dylib_suffix)\n","sub_path":"thirdparty/build_definitions/boost.py","file_name":"boost.py","file_ext":"py","file_size_in_byte":3488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"459441336","text":"import test_data\nimport sys\nimport json\n\n#Creates and returns a GameLibrary object(defined in test_data) from loaded json_data\ndef make_game_library_from_json(json_data):\n #Initialize a new GameLibrary\n game_library = test_data.GameLibrary()\n with open(json_data) as data_file:\n jason = json.load(data_file)\n for i in jason:\n newGame = test_data.Game(i[\"title\"],i[\"platform\"],i[\"Year\"])\n game_library.add_game(newGame)\n\n #Loop through the json_data\n #Create a new Game object from the json_data by reading\n # title\n # year\n # platform (which requires reading name and launch_year)\n #Add that Game object to the game_library\n #Return the completed game_library\n\n #for i in json_data:\n #newGame = test_data.Game(json_data[i][\"title\"], json_data[i][\"platform\"], json_data[i][\"Year\"])\n #game_library.add_game(newGame)\n\n\n\n return game_library\n\n# Handling command line arguments\n# Note: sys.argv is a list of strings that contains each command line argument\n# The first element in the list is always the name of the python file being run\n# Command line format: \n\ndefault_input_json_file = \"data/test_data.json\"\n\nif len(sys.argv) == 2:\n input_json_file = sys.argv[1]\n print(\"Using command line args:\", input_json_file)\nelse:\n print(\"Unknown command line options. Using default values:\", default_input_json_file)\n input_json_file = default_input_json_file\n\n#Load the json data from the input file\n#Use make_game_library_from_json(json_data) to convert the data to GameLibrary data\n#Print out the resulting GameLibrary data using print_game_library(game_library_data) in test_data.py\n\n\nfuq= make_game_library_from_json(\"wow.json\")\ntest_data.print_game_library(fuq)\n","sub_path":"test_json_utils.py","file_name":"test_json_utils.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"78313439","text":"class Queue:\n MAX_QSIZE = 100\n def __init__(self):\n self.items = [None]*Queue.MAX_QSIZE\n self.front = -1\n self.rear = -1\n self.size = 0\n def isEmpty(self):\n return self.size == 0\n def enqueue(self, e):\n if self.size == len(self.items):\n print(\"Queue is full\")\n self.resize(2*len(self.items))\n else:\n self.rear = (self.rear + 1)%(len(self.items))\n self.items[self.rear] = e\n self.size += 1\n def dequeue(self):\n if self.isEmpty():\n print(\"Queue is empty\")\n else:\n self.front = (self.front + 1)%(len(self.items))\n e = self.items[self.front]\n self.size -= 1\n return e\n def resize(self, cap):\n olditems = self.items\n self.items = [None]*cap\n walk = (self.front + 1)%(len(self.items))\n for k in range(olditems):\n self.items[k] = olditems[walk]\n walk = (walk+1)%len(olditems)\n self.front = -1\n self.rear = self.size -1\n","sub_path":"QueueClass.py","file_name":"QueueClass.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"70917336","text":"#christopher pullen\r\n#05.12.14\r\n#development exercise 3 functions\r\n\r\ndef details ():\r\n currency = input (\"please enter your currency (euro, us dollar or british pound)\")\r\n currency_wanted = input (\"please enter the currency you wish to convert too(euro , us dollar or British pound)\")\r\n money = int(input( \"please enter the amount of money you wish to be converted\"))\r\n return currency,currency_wanted,money\r\n\r\ndef conversion (currency,currency_wanted,money):\r\n if currency == \"euro\" :\r\n if currency_wanted == \"us doller\":\r\n converted_money = money * 1.302\r\n else :\r\n converted_money = money *0.814\r\n elif currency == \"us doller\":\r\n if currency_wanted == \"euro\":\r\n converted_money = money *0.768\r\n else :\r\n converted_money = money *0.625\r\n else :\r\n if currency_wanted == \"euro\":\r\n converted_money = money *1.229\r\n else:\r\n converted_money = money *1.601\r\n return converted_money\r\n\r\ndef output (money,converted_money):\r\n print (\"{0}={1}\".format (money, converted_money))\r\n \r\ncurrency, currency_wanted , money = details()\r\nconverted_money = conversion (currency,currency_wanted,money)\r\noutput(money,converted_money)\r\n","sub_path":"Functions development exercise 3.py","file_name":"Functions development exercise 3.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"381440257","text":"\"\"\"\n data_manipulations.py\n \n The code here is used to perform data manipulation. The following modules are available\n \n 1. Missing value calculation\n 2. Identify the data type for each variables\n 3. Convert Categorical to Numerical using Label encoders\n 4. Impute Numerical columns with a specific value. The default is set to 0.\n 5. Rename columns\n 6. Join X and Y vector using a monotonically increasing row id\n 7. Train, Valid and Test data creator\n 8. Assembling vectors\n 9. Scale Input variables\n\"\"\"\n\n\nfrom pyspark.ml.feature import StringIndexer\nfrom pyspark.ml import Pipeline\nfrom pyspark.sql import *\nfrom pyspark.sql.types import *\nfrom pyspark.sql import functions as F\nfrom pyspark.ml.feature import IndexToString\nfrom pyspark.sql.functions import col\nfrom pyspark.ml.feature import StandardScaler\nfrom pyspark.ml.feature import VectorAssembler\n\n# 1. Missing value calculation\n\ndef missing_value_calculation(X, miss_per=0.75):\n \n missing = X.select([F.count(F.when(F.isnan(c) | F.col(c).isNull(), c)).alias(c) for c in X.columns])\n missing_len = X.count()\n final_missing = missing.toPandas().transpose()\n final_missing.reset_index(inplace=True)\n final_missing.rename(columns={0:'missing_count'},inplace=True)\n final_missing['missing_percentage'] = final_missing['missing_count']/missing_len\n vars_selected = final_missing['index'][final_missing['missing_percentage'] <= miss_per]\n return vars_selected\n\n# 2. Identify the data type for each variables\n\ndef identify_variable_type(X):\n \n l = X.dtypes\n char_vars = []\n num_vars = []\n for i in l:\n if i[1] in ('string'):\n char_vars.append(i[0])\n else:\n num_vars.append(i[0])\n return char_vars, num_vars\n\n# 3. Convert Categorical to Numerical using Label encoders\n\ndef categorical_to_index(X, char_vars):\n chars = X.select(char_vars)\n indexers = [StringIndexer(inputCol=column, outputCol=column+\"_index\",handleInvalid=\"keep\") for column in chars.columns]\n pipeline = Pipeline(stages=indexers)\n char_labels = pipeline.fit(chars)\n X = char_labels.transform(X)\n return X, char_labels\n\n# 4. Impute Numerical columns with a specific value. The default is set to 0.\n\ndef numerical_imputation(X,num_vars, impute_with=0):\n X = X.fillna(impute_with,subset=num_vars)\n return X\n\n# 5. Rename columns\n\ndef rename_columns(X, char_vars):\n mapping = dict(zip([i+ '_index' for i in char_vars], char_vars))\n X = X.select([col(c).alias(mapping.get(c, c)) for c in X.columns])\n return X\n\n# 6. Join X and Y vector using a monotonically increasing row id\n\ndef join_features_and_target(X, Y):\n \n X = X.withColumn('id', F.monotonically_increasing_id())\n Y = Y.withColumn('id', F.monotonically_increasing_id())\n joinedDF = X.join(Y,'id','inner')\n joinedDF = joinedDF.drop('id')\n return joinedDF\n\n# 7. Train, Valid and Test data creator\n\ndef train_valid_test_split(df, train_size=0.4, valid_size=0.3,seed=12345):\n \n train, valid, test = df.randomSplit([train_size, valid_size,1-train_size-valid_size], seed=12345)\n return train,valid,test\n\n# 8. Assembling vectors\n\ndef assembled_vectors(train,list_of_features_to_scale,target_column_name):\n \n stages = []\n assembler = VectorAssembler(inputCols=list_of_features_to_scale, outputCol='features')\n stages=[assembler]\n selectedCols = [target_column_name,'features'] + list_of_features_to_scale\n\n pipeline = Pipeline(stages=stages)\n assembleModel = pipeline.fit(train)\n\n train = assembleModel.transform(train).select(selectedCols)\n return train\n\n# 9. Scale Input variables\n\ndef scaled_dataframes(train,valid,test,list_of_features_to_scale,target_column_name):\n\n stages = []\n assembler = VectorAssembler(inputCols=list_of_features_to_scale, outputCol='assembled_features')\n scaler = StandardScaler(inputCol=assembler.getOutputCol(), outputCol='features')\n stages=[assembler,scaler]\n selectedCols = [target_column_name,'features'] + list_of_features_to_scale\n\n pipeline = Pipeline(stages=stages)\n pipelineModel = pipeline.fit(train)\n\n train = pipelineModel.transform(train).select(selectedCols)\n valid = pipelineModel.transform(valid).select(selectedCols)\n test = pipelineModel.transform(test).select(selectedCols)\n\n return train, valid, test, pipelineModel","sub_path":"data_manipulations.py","file_name":"data_manipulations.py","file_ext":"py","file_size_in_byte":4397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"592561232","text":"# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n# Make deltas for historical vs projected data\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n\ndef run(duration):\n\n # Get historical data\n # (for the historical data, there should only be one file for this duration)\n hist_file = glob.glob(os.path.join(path,'*_{}_historical_*_{}*_diff.nc'.format(data_group,duration)))[0]\n hist_ds = xr.open_dataset(hist_file)\n\n # Get projected date\n # (There should be one for each decade for this duration)\n proj_files = glob.glob(os.path.join(path,'*_{}_rcp85_*_{}*_diff.nc'.format(data_group,duration)))\n\n # Compute Deltas\n for fn in proj_files:\n proj_ds = xr.open_dataset(fn)\n\n proj_ds['pf'] /= hist_ds['pf']\n\n out_fn = os.path.join(\n out_path,\n os.path.basename(fn)\\\n .replace('_diff.nc', '_deltas.nc')\\\n .replace('rcp85_','')\n )\n proj_ds.to_netcdf(out_fn)\n\n proj_ds.close()\n\n hist_ds.close()\n\n\nif __name__ == '__main__':\n import os, glob, itertools\n import xarray as xr\n # import multiprocessing as mp\n import argparse\n\n # parse some args\n parser = argparse.ArgumentParser( description='Compute deltas for historical vs. projected data.' )\n parser.add_argument( \"-p\", \"--path\", action='store', dest='path', type=str, help=\"input directory storing the return interval data.\" )\n parser.add_argument( \"-o\", \"--out_path\", action='store', dest='out_path', type=str, help=\"output directory to write outputs\" )\n parser.add_argument( \"-d\", \"--data_group\", action='store', dest='data_group', type=str, help=\"name of the model to use: either 'NCAR-CCSM4' or 'GFDL-CM3'\" )\n \n # parse the args and unpack\n args = parser.parse_args()\n path = args.path\n out_path = args.out_path\n data_group = args.data_group\n\n DURATIONS_NOAA = ['60m','2h','3h','6h','12h','24h','2d','3d','4d','7d','10d','20d','30d','45d','60d',] # names of the durations in the NOAA tool\n\n for d in DURATIONS_NOAA:\n print(\" duration: {}\".format(d))\n run(d)\n","sub_path":"pipeline/deltas.py","file_name":"deltas.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"456204911","text":"import pandas as pd\nimport torch\nimport torch.nn as nn\n\ndtype = torch.float\ndevice = torch.device(\"cpu\")\n\n# num_classes, inputs, targets = reader.load_data()\n\n# use working example instead\ninputs = pd.read_csv('../../data/colorsurvey/example_data/inputs_train.csv')\ntargets = pd.read_csv('../../data/colorsurvey/example_data/targets_train.csv')\n\nN = inputs.shape[0]\nD_in = inputs.shape[1]\nD_out = targets.max().values[0] + 1\nH = 9\n\nprint(N, D_in, H, D_out)\n\nx = torch.tensor(inputs.values, device=device, dtype=dtype)\ny = torch.tensor(targets.values, device=device, dtype=torch.long).squeeze()\n\n# Hyper-parameters\nlearning_rate = 0.0005\nbatch_size = 64\n\n# Neuronal Network\nmodel = torch.nn.Sequential(\n torch.nn.Linear(D_in, D_out)\n)\n\n# Loss and optimizer\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n\nloss_hist = []\n\n# Train\nepochs = range(2000)\nidx = 0\nfor t in epochs:\n for batch in range(0, int(N / batch_size)):\n # Berechne den Batch\n\n batch_x = x[batch * batch_size: (batch + 1) * batch_size, :]\n batch_y = y[batch * batch_size: (batch + 1) * batch_size]\n\n # Berechne die Vorhersage (foward step)\n outputs = model(batch_x)\n\n # Berechne den Fehler (Ausgabe des Fehlers alle 100 Iterationen)\n loss = criterion(outputs, batch_y)\n\n # Berechne die Gradienten und Aktualisiere die Gewichte (backward step)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # Berechne den Fehler (Ausgabe des Fehlers alle 100 Iterationen)\n if t % 50 == 0:\n loss_hist.append(loss.item())\n print(t, loss.item())\n torch.save(model, 'vanilla.pt')\n\ntorch.save(model, 'vanilla.pt')\n","sub_path":"src/train/vanilla_net.py","file_name":"vanilla_net.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"88799135","text":"import pigpio\nimport time\n\nif __name__ == \"__main__\":\n \n import pigpio\n\n import test_remote;\n\n pi = pigpio.pi()\n\n pi.set_mode(21, pigpio.OUTPUT)\n pi.set_mode(20, pigpio.OUTPUT)\n pi.set_mode(19, pigpio.OUTPUT)\n pi.write(21, 1)\n time.sleep(1)\n pi.write(21, 0)\n pi.write(20, 1)\n time.sleep(1)\n pi.write(20, 0)\n pi.write(19, 1)\n time.sleep(1)\n pi.write(19, 0)\n\n","sub_path":"donkeycar/tools/test_gpio.py","file_name":"test_gpio.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"7169799","text":"import subprocess\nimport os\nfrom contextlib import contextmanager\nfrom .command_result import CommandResult, CommandException\n\n\ndef complete_openbis_config(config, resolver, local_only=True):\n \"\"\"Add default values for empty entries in the config.\"\"\"\n config_dict = resolver.config.config_dict(local_only)\n if config.get('url') is None:\n config['url'] = config_dict['openbis_url']\n if config.get('verify_certificates') is None:\n config['verify_certificates'] = config_dict['verify_certificates']\n if config.get('token') is None:\n config['token'] = None\n if config.get('allow_http_but_do_not_use_this_in_production_and_only_within_safe_networks') is None:\n config['allow_http_but_do_not_use_this_in_production_and_only_within_safe_networks'] = not config_dict['allow_only_https']\n\n\ndef complete_git_config(config):\n \"\"\"Add default values for empty entries in the config.\"\"\"\n\n find_git = config['find_git'] if config.get('find_git') is not None else True\n if find_git:\n git_cmd = locate_command('git')\n if git_cmd.success():\n config['git_path'] = git_cmd.output\n\n git_annex_cmd = locate_command('git-annex')\n if git_annex_cmd.success():\n config['git_annex_path'] = git_annex_cmd.output\n\n\ndef default_echo(details):\n if details.get('level') != \"DEBUG\":\n print(details['message'])\n\n\ndef run_shell(args, shell=False, strip_leading_whitespace=True, raise_exception_on_failure=False):\n result = CommandResult(subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell), strip_leading_whitespace=strip_leading_whitespace)\n if raise_exception_on_failure == True and result.failure():\n raise CommandException(result)\n return result\n\n\ndef locate_command(command):\n \"\"\"Return a tuple of (returncode, stdout).\"\"\"\n # Need to call this command in shell mode so we have the system PATH\n result = run_shell(['type {}'.format(command)], shell=True)\n # 'type -p' not supported by all shells, so we do it manually\n if result.success():\n result.output = result.output.split(\" \")[-1]\n return result\n\n\n@contextmanager\ndef cd(newdir):\n \"\"\"Safe cd -- return to original dir after execution, even if an exception is raised.\"\"\"\n prevdir = os.getcwd()\n os.chdir(os.path.expanduser(newdir))\n try:\n yield\n finally:\n os.chdir(prevdir)\n","sub_path":"obis/src/python/obis/dm/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"596571133","text":"import json\n\nclass LastData:\n \"\"\"Saves the last time checked and last submission id for checking the next roo\"\"\"\n def __init__(self, path=None):\n self.path = \"last_data.json\"\n if path:\n self.path = path\n\n try:\n with open(self.path, 'r') as f:\n self.data = json.load(f)\n except FileNotFoundError:\n self.data = {}\n\n def get(self, *args, **kwargs):\n return self.data.get(*args, **kwargs)\n\n def save(self):\n with open(self.path, \"w\") as f:\n json.dump(self.data, f, sort_keys=True, indent=4)\n","sub_path":"core/last_data.py","file_name":"last_data.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"257003043","text":"import pygame\r\nimport time\r\nimport random\r\n\r\npygame.init()\r\nwidth = 1280\r\nheight = 720\r\nsize = (width, height)\r\nFIRST_DICE = 0\r\nblack = (0, 0, 0)\r\ngray = (120, 120, 120)\r\nwhite = (255, 255, 255)\r\nred = (255, 0, 0)\r\nlime = (0, 255, 0)\r\nblue = (0, 0, 255)\r\nd_red = (139, 0, 0)\r\nyellow = (227, 207, 87)\r\ngold = (255, 215, 0)\r\ngreen = (0, 128, 0)\r\nd_blue = (30, 114, 225)\r\n\r\npygame.display.set_caption(\"Dice Roller (with images!)\")\r\nscreen = pygame.display.set_mode((size))\r\nclock = pygame.time.Clock()\r\nalready_rolled = False\r\nCatergory_selected = False\r\nbg = pygame.image.load(\"bb.jpg\")\r\ncpup = pygame.image.load(\"Blanco_Frame.png\")\r\nCsound = pygame.mixer.Sound(\"moving cursor.wav\")\r\nOpen_vraag = pygame.image.load('Openvraag.png')\r\nMeerkeu_vraag = pygame.image.load('Meerkeuze.png')\r\n\r\n#Dit zijn alle afbeeldingen van de vragen\r\n#sportvraag afbeeldingen\r\nsv1 = pygame.image.load('sportvragen 1.jpg')\r\nsv2 = pygame.image.load('sportvragen 2.jpg')\r\nsv3 = pygame.image.load('sportvragen 3.jpg')\r\nsv4 = pygame.image.load('sportvragen 4.jpg')\r\nsv5 = pygame.image.load('sportvragen 5.jpg')\r\nsv6 = pygame.image.load('sportvragen 6.jpg')\r\nsv7 = pygame.image.load('sportvragen 7.jpg')\r\nsv8 = pygame.image.load('sportvragen 8.jpg')\r\nsv9 = pygame.image.load('sportvragen 9.jpg')\r\nsv10 = pygame.image.load('sportvragen 10.jpg')\r\nsv11 = pygame.image.load('sportvragen 11.jpg')\r\nsv12 = pygame.image.load('sportvragen 12.jpg')\r\nsv13 = pygame.image.load('sportvragen 13.jpg')\r\nsv14 = pygame.image.load('sportvragen 14.jpg')\r\nsv15 = pygame.image.load('sportvragen 15.jpg')\r\n\r\n# historyvragen afbeeldingen\r\nhv1 = pygame.image.load('historyvragen 1.jpg')\r\nhv2 = pygame.image.load('historyvragen 2.jpg')\r\nhv3 = pygame.image.load('historyvragen 3.jpg')\r\nhv4 = pygame.image.load('historyvragen 4.jpg')\r\nhv5 = pygame.image.load('historyvragen 5.jpg')\r\nhv6 = pygame.image.load('historyvragen 6.jpg')\r\nhv7 = pygame.image.load('historyvragen 7.jpg')\r\nhv8 = pygame.image.load('historyvragen 8.jpg')\r\nhv9 = pygame.image.load('historyvragen 9.jpg')\r\nhv10 = pygame.image.load('historyvragen 10.jpg')\r\nhv11 = pygame.image.load('historyvragen 11.jpg')\r\nhv12 = pygame.image.load('historyvragen 12.jpg')\r\nhv13 = pygame.image.load('historyvragen 13.jpg')\r\nhv14 = pygame.image.load('historyvragen 14.jpg')\r\nhv15 = pygame.image.load('historyvragen 15.jpg')\r\n\r\n# entertainmentvragen afbeeldingen\r\nev1 = pygame.image.load('entertainmentvragen 1.jpg')\r\nev2 = pygame.image.load('entertainmentvragen 2.jpg')\r\nev3 = pygame.image.load('entertainmentvragen 3.jpg')\r\nev4 = pygame.image.load('entertainmentvragen 4.jpg')\r\nev5 = pygame.image.load('entertainmentvragen 5.jpg')\r\nev6 = pygame.image.load('entertainmentvragen 6.jpg')\r\nev7 = pygame.image.load('entertainmentvragen 7.jpg')\r\nev8 = pygame.image.load('entertainmentvragen 8.jpg')\r\nev9 = pygame.image.load('entertainmentvragen 9.jpg')\r\nev10 = pygame.image.load('entertainmentvragen 10.jpg')\r\nev11 = pygame.image.load('entertainmentvragen 11.jpg')\r\nev12 = pygame.image.load('entertainmentvragen 12.jpg')\r\nev13 = pygame.image.load('entertainmentvragen 13.jpg')\r\nev14 = pygame.image.load('entertainmentvragen 14.jpg')\r\nev15 = pygame.image.load('entertainmentvragen 15.jpg')\r\n\r\n# technologievragen afbeeldingen\r\ntv1 = pygame.image.load('technologievragen 1.jpg')\r\ntv2 = pygame.image.load('technologievragen 2.jpg')\r\ntv3 = pygame.image.load('technologievragen 3.jpg')\r\ntv4 = pygame.image.load('technologievragen 4.jpg')\r\ntv5 = pygame.image.load('technologievragen 5.jpg')\r\ntv6 = pygame.image.load('technologievragen 6.jpg')\r\ntv7 = pygame.image.load('technologievragen 7.jpg')\r\ntv8 = pygame.image.load('technologievragen 8.jpg')\r\ntv9 = pygame.image.load('technologievragen 9.jpg')\r\ntv10 = pygame.image.load('technologievragen 10.jpg')\r\ntv11 = pygame.image.load('technologievragen 11.jpg')\r\ntv12 = pygame.image.load('technologievragen 12.jpg')\r\ntv13 = pygame.image.load('technologievragen 13.jpg')\r\ntv14 = pygame.image.load('technologievragen 14.jpg')\r\ntv15 = pygame.image.load('technologievragen 15.jpg')\r\n\r\n#Een class met de attributen voor vragen\r\n# categorie zodat ik later kan zeggen - if categorie = blabla dan vragen uit die lijst\r\n# vraagafbeelding omdat ik die wil laten zien aan de speler\r\n# antwoord omdat elke vraag een antwoord heeft, die gecheckt moet worden.\r\n# (if user input = antwoord dan correct oid)\r\n# soortvraag omdat de speler moet dobbelen voor open/meerkeuze.\r\n# if dobbel =< 6 dan keuze, anders open\r\n\r\nclass vragen:\r\n def __init__(self, categorie, vraagafbeelding, antwoord, soortvraag):\r\n self.Categorie = categorie\r\n self.vraag = vraagafbeelding\r\n self.Antwoord = antwoord\r\n self.Soortvraag = soortvraag\r\n\r\n#vanaf hier komen de vragen met al hun attributen te staan als global variable\r\n#sportvragen\r\ns1 = vragen(\"sportvragen\", sv1,\"A\", \"keuze\")\r\ns2 = vragen(\"sportvragen\", sv2,\"A\",\"keuze\")\r\ns3 = vragen(\"sportvragen\", sv3,\"B\", \"keuze\")\r\ns4 = vragen(\"sportvragen\", sv4,\"B\", \"keuze\")\r\ns5 = vragen(\"sportvragen\", sv5,\"A\", \"keuze\")\r\ns6 = vragen(\"sportvragen\", sv6, \"C\", \"keuze\")\r\ns7 = vragen(\"sportvragen\", sv7,\"B\", \"keuze\")\r\ns8 = vragen(\"sportvragen\", sv8,\"C\", \"keuze\")\r\ns9 = vragen(\"sportvragen\", sv9,\"A\", \"keuze\")\r\ns10 = vragen(\"sportvragen\", sv10,\"C\", \"keuze\")\r\ns11 = vragen(\"sportvragen\", sv11,\"DE KUIP\", \"open\")\r\ns12 = vragen(\"sportvragen\", sv12,\"DUNKEN\", \"open\")\r\ns13 = vragen(\"sportvragen\", sv13,\"CITY RACE ROTTERDAM\", \"open\")\r\ns14 = vragen(\"sportvragen\", sv14,\"B\", \"keuze\")\r\ns15 = vragen(\"sportvragen\", sv15,\"FEYENOORD\", \"open\")\r\n\r\n#historyvragen\r\nh1 = vragen(\"historyvragen\", hv1, \"B\", \"keuze\")\r\nh2 = vragen(\"historyvragen\", hv2, \"A\", \"keuze\")\r\nh3 = vragen(\"historyvragen\", hv3, \"B\", \"keuze\")\r\nh4 = vragen(\"historyvragen\", hv4, \"C\", \"keuze\")\r\nh5 = vragen(\"historyvragen\", hv5, \"B\", \"keuze\")\r\nh6 = vragen(\"historyvragen\", hv6, \"C\", \"keuze\")\r\nh7 = vragen(\"historyvragen\", hv7, \"A\", \"keuze\")\r\nh8 = vragen(\"historyvragen\", hv8, \"B\", \"keuze\")\r\nh9 = vragen(\"historyvragen\", hv9, \"A\", \"keuze\")\r\nh10 = vragen(\"historyvragen\", hv10, \"A\", \"keuze\")\r\nh11 = vragen(\"historyvragen\", hv11, \"PIM FORTUYN\", \"open\")\r\nh12 = vragen(\"historyvragen\", hv12, \"BOKITO\", \"open\")\r\nh13 = vragen(\"historyvragen\", hv13, \"LOODS24\", \"open\")\r\nh14 = vragen(\"historyvragen\", hv14, \"C\", \"keuze\")\r\nh15 = vragen(\"historyvragen\", hv15, \"MAASVLAKTE\", \"open\")\r\n\r\n#entertainmentvragen\r\ne1 = vragen(\"entertainmentvragen\", ev1, \"B\", \"keuze\")\r\ne2 = vragen(\"entertainmentvragen\", ev2, \"A\", \"keuze\")\r\ne3 = vragen(\"entertainmentvragen\", ev3, \"BLIJDORP\", \"open\")\r\ne4 = vragen(\"entertainmentvragen\", ev4, \"ERASMUSBRUG\", \"open\")\r\ne5 = vragen(\"entertainmentvragen\", ev5, \"AHOY\", \"open\")\r\ne6 = vragen(\"entertainmentvragen\", ev6, \"A\", \"keuze\")\r\ne7 = vragen(\"entertainmentvragen\", ev7, \"C\", \"keuze\")\r\ne8 = vragen(\"entertainmentvragen\", ev8, \"A\", \"keuze\")\r\ne9 = vragen(\"entertainmentvragen\", ev9, \"A\", \"keuze\")\r\ne10 = vragen(\"entertainmentvragen\", ev10, \"C\", \"keuze\")\r\ne11 = vragen(\"entertainmentvragen\", ev11, \"C\", \"keuze\")\r\ne12 = vragen(\"entertainmentvragen\", ev12, \"B\", \"keuze\")\r\ne13 = vragen(\"entertainmentvragen\", ev13, \"B\", \"keuze\")\r\ne14 = vragen(\"entertainmentvragen\", ev14, \"ZOMBIEWALK\", \"open\")\r\ne15 = vragen(\"entertainmentvragen\", ev15, \"B\", \"keuze\")\r\n\r\n#technologievragen\r\nt1 = vragen(\"technologievragen\", tv1, \"POKEMON GO\", \"open\")\r\nt2 = vragen(\"technologievragen\", tv2, \"C\", \"keuze\")\r\nt3 = vragen(\"technologievragen\", tv3, \"B\", \"keuze\")\r\nt4 = vragen(\"technologievragen\", tv4, \"C\", \"keuze\")\r\nt5 = vragen(\"technologievragen\", tv5, \"B\", \"keuze\")\r\nt6 = vragen(\"technologievragen\", tv6, \"A\", \"keuze\")\r\nt7 = vragen(\"technologievragen\", tv7, \"B\", \"keuze\")\r\nt8 = vragen(\"technologievragen\", tv8, \"BINAIRE\", \"open\")\r\nt9 = vragen(\"technologievragen\", tv9, \"B\", \"keuze\")\r\nt10 = vragen(\"technologievragen\", tv10, \"KUBUSWONINGEN\", \"open\")\r\nt11 = vragen(\"technologievragen\", tv11, \"B\", \"keuze\")\r\nt12 = vragen(\"technologievragen\", tv12, \"A\", \"keuze\")\r\nt13 = vragen(\"technologievragen\", tv13, \"A\", \"keuze\")\r\nt14 = vragen(\"technologievragen\", tv14, \"B\", \"keuze\")\r\nt15 = vragen(\"technologievragen\", tv15, \"KABOUTER BUTTPLUG\", \"open\")\r\n\r\n\r\n#lijsten aanmaken voor de vragen, om later elementen te kunnen removen\r\n# (je wilt niet 2 keer dezelfde vraag\r\nsportvragen = [s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15]\r\nhistoryvragen = [h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11, h12, h13, h14, h15]\r\nentertainmentvragen = [e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15]\r\ntechnologievragen = [t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15]\r\n\r\n\r\ndef vragenoproep():\r\n randomvraag = random.choice(sportvragen)\r\n screen.blit(randomvraag.vraag, (200, 200))\r\n screen.display.update()\r\n return randomvraag\r\n\r\ndef vraagbeantwoord():\r\n sportvragen.remove(randomvraag)\r\n\r\ndef Qanimation():\r\n global Open_vraag, Meerkeu_vraag\r\n screen.blit(Open_vraag, (0, 0))\r\n\r\n\r\n# text for button\r\ndef text_objects(text, font):\r\n textSurface = font.render(text, True, (0, 0, 0))\r\n return textSurface, textSurface.get_rect()\r\n\r\n\r\n# text on display(large text)\r\ndef message_display(text):\r\n largeText = pygame.font.Font('freesansbold.ttf', 115)\r\n TextSurf, TextRect = text_objects(text, largeText)\r\n TextRect.center = ((width / 2), (height / 2))\r\n screen.blit(TextSurf, TextRect)\r\n\r\n pygame.display.update()\r\n\r\n time.sleep(2)\r\n\r\n\r\n# buttons\r\ndef button(msg, x, y, w, h, ic, ac, action=None):\r\n global Csound\r\n mouse = pygame.mouse.get_pos()\r\n click = pygame.mouse.get_pressed()\r\n print(click)\r\n if x + w > mouse[0] > x and y + h > mouse[1] > y:\r\n pygame.draw.rect(screen, ac, (x, y, w, h))\r\n\r\n if click[0] == 1 and action != None:\r\n Csound.play()\r\n pygame.time.delay(1000)\r\n action()\r\n pygame.mixer.stop()\r\n else:\r\n pygame.draw.rect(screen, ic, (x, y, w, h))\r\n\r\n smallText = pygame.font.SysFont(\"Arial black\", 20)\r\n textSurf, textRect = text_objects(msg, smallText)\r\n textRect.center = ((x + (w / 2)), (y + (h / 2)))\r\n screen.blit(textSurf, textRect)\r\n\r\n\r\ndef roll_a_dice():\r\n typeQ = [\"open\"] * 20 + [\"meerkeuze\"] * 80\r\n result = random.choice(typeQ)\r\n return result\r\n\r\n\r\n# determines which first dice is used\r\ndef display_first(first):\r\n if (first == \"open\"):\r\n screen.blit(Open_vraag, (0, 0))\r\n pygame.display.flip()\r\n produce_button_message(\"Klik Doorgaan\", 575, 250)\r\n produce_button_message2(\"Type vraag\", 615, 152)\r\n button(\"Doorgaan\", 423, 520, 458, 32, yellow, gold, Select_Catagory)\r\n elif (first == \"meerkeuze\"):\r\n screen.blit(Meerkeu_vraag, (0, 0))\r\n pygame.display.flip()\r\n produce_button_message(\"Klik Doorgaan\", 575, 250)\r\n produce_button_message2(\"Type vraag\", 615, 152)\r\n button(\"Doorgaan\", 423, 520, 458, 32, yellow, gold, Select_Catagory)\r\n\r\n\r\n# tells the user how to roll\r\ndef produce_button_message(text, x, y):\r\n our_font = pygame.font.SysFont(\"Arial black\", 20)\r\n # render the text now\r\n produce_text = our_font.render(text, 1, black)\r\n screen.blit(produce_text, (x, y))\r\n\r\n\r\ndef produce_button_message2(text, x, y):\r\n our_font = pygame.font.SysFont(\"Arial black\", 15)\r\n # render the text now\r\n produce_text = our_font.render(text, 1, black)\r\n screen.blit(produce_text, (x, y))\r\n\r\n\r\n# our roll will display message with our roll converted to text form, alongside\r\ndef before_roll():\r\n blank_popup = pygame.image.load('empty_popup.png')\r\n screen.blit(blank_popup, (0, 0))\r\n produce_button_message(\"Druk op 'Rol' om te dobbelen\", 505, 280)\r\n produce_button_message2(\"Type vraag\", 615, 152)\r\n button(\"roll\", 372, 357, 560, 83, (139, 131, 134), (75, 75, 75), rollfunc)\r\n\r\n\r\ndef our_roll():\r\n # Completed roll Message. Cast int to str to output the message clearly\r\n text = \"Je hebt \" + str(FIRST_DICE) + \" gegooid.\"\r\n print(text)\r\n produce_roll_message(text, 200, 200)\r\n\r\n\r\ndef rollfunc():\r\n global FIRST_DICE, already_rolled\r\n FIRST_DICE = roll_a_dice()\r\n roll_occur = True\r\n\r\n\r\n# We don't want our roll value output before the first roll occurs.\r\ndef typeQroll():\r\n global already_rolled\r\n roll_occur = False\r\n while already_rolled == False:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n already_rolled = True\r\n\r\n screen.fill(white)\r\n screen.blit(bg, (0, 0))\r\n before_roll()\r\n display_first(FIRST_DICE)\r\n pygame.display.flip()\r\n # If the roll is requested, our_roll will execute.\r\n\r\n\r\ndef Select_Catagory():\r\n global Catergory_selected\r\n while Catergory_selected == False:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n Catergory_selected = True\r\n screen.blit(cpup, (0, 0))\r\n produce_button_message(\"Kies een categorie\", 555, 250)\r\n produce_button_message2(\"Categorie\", 620, 152)\r\n button(\"Entertainment\", 402, 300, 180, 70, yellow, gold, vragenoproep)\r\n button(\"Geschiedenis\", 730, 300, 180, 70, d_blue, blue, vragenoproep)\r\n button(\"Sport\", 730, 390, 180, 70, green, lime, vragenoproep)\r\n button(\"Techologie\", 402, 390, 180, 70, d_red, red, vragenoproep)\r\n pygame.display.flip()\r\n\r\n\r\n# Once the loop exits, the program will quit.\r\n# Loop will exit when the 'Exit' button on the window is clicked.This bit of code just ensures you can actually\r\n# click that and exit.\r\ntypeQroll()\r\npygame.quit()\r\nquit()","sub_path":"vraagafbeeldingen/vraagafbeeldingen/vragenlatenwerken.py","file_name":"vragenlatenwerken.py","file_ext":"py","file_size_in_byte":13530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"470776296","text":"#!/usr/bin/env python\nimport smach\nimport rospy\nimport numpy as np\nfrom sensor_msgs.msg import JointState\nfrom ambf_walker.msg import DesiredJoints\nfrom GaitAnaylsisToolkit.LearningTools.Runner import TPGMMRunner\nfrom std_msgs.msg import Float32MultiArray\nfrom Model import Model\nfrom std_msgs.msg import Empty,String\nimport matplotlib.pyplot as plt\nfrom ambf_walker.srv import DesiredJointsCmdRequest, DesiredJointsCmd\n#from ilqr.controller import RecedingHorizonController\n#from ilqr.cost import PathQsRCost\n#from ilqr import iLQR\n#from ilqr.dynamics import FiniteDiffDynamics\nfrom GaitAnaylsisToolkit.LearningTools.Runner import GMMRunner\nimport numpy.polynomial.polynomial as poly\nfrom os.path import dirname, join\n\n\n\nclass HumanInitialize(smach.State):\n\n def __init__(self, model, outcomes=['Initializing', 'Initialized']):\n\n smach.State.__init__(self, outcomes=outcomes)\n rospy.wait_for_service('joint_cmd')\n self._model = model\n self.rate = rospy.Rate(100)\n tf = 2.0\n dt = 0.01\n self.hip, self.knee, self.ankle = self._model.stance_trajectory(tf=tf, dt=dt)\n self.msg = DesiredJoints()\n self.pub = rospy.Publisher(self._model.model_name + \"_set_points\", DesiredJoints, queue_size=1)\n\n self.total = tf / dt\n self.count = 0\n\n def execute(self, userdata):\n\n self._model.handle.set_rpy(0.25, 0, 0)\n self._model.handle.set_pos(0.0, 0, 3.0)\n\n if self.count <= self.total - 1:\n\n q = np.array([self.hip[\"q\"][self.count].item(), self.knee[\"q\"][self.count].item(),\n self.ankle[\"q\"][self.count].item(),\n self.hip[\"q\"][self.count].item(), self.knee[\"q\"][self.count].item(),\n self.ankle[\"q\"][self.count].item(), 0.0])\n\n qd = np.array([self.hip[\"qd\"][self.count].item(), self.knee[\"qd\"][self.count].item(),\n self.ankle[\"qd\"][self.count].item(),\n self.hip[\"qd\"][self.count].item(), self.knee[\"qd\"][self.count].item(),\n self.ankle[\"qd\"][self.count].item(), 0.0])\n\n qdd = np.array([self.hip[\"qdd\"][self.count].item(), self.knee[\"qdd\"][self.count].item(),\n self.ankle[\"qdd\"][self.count].item(),\n self.hip[\"qdd\"][self.count].item(), self.knee[\"qdd\"][self.count].item(),\n self.ankle[\"qdd\"][self.count].item(), 0.0])\n\n self.count += 1\n self.msg.q = q\n self.msg.qd = qd\n self.msg.qdd = qdd\n self.msg.controller = \"Dyn\"\n self.pub.publish(self.msg)\n #self.send(q, qd, qdd, \"Dyn\", [])\n self.rate.sleep()\n\n return 'Initializing'\n else:\n return \"Initialized\"","sub_path":"StateMachines/TestStates.py","file_name":"TestStates.py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"237918065","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nimg = cv2.imread('images/beach_trash_3.jpg')\ncolor = ('b','g','r')\n\nfor i, col in enumerate(color):\n histr = cv2.calcHist([img],[i],None,[256],[0,256])\n plt.plot(histr, color = col)\n plt.xlim([0,256])\n\ncv2.imshow('Original image', img)\nplt.show()\n\nimg_gray = cv2.imread('images/beach_trash_3.jpg',0)\ncv2.imshow('Original image', img_gray)\nplt.hist(img_gray.ravel(),256,[0,256])\nplt.show()\n\n\n","sub_path":"histogram-plotting.py","file_name":"histogram-plotting.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"315529895","text":"import itertools\nimport logging\nimport os\nimport pkg_resources\n\nimport yaml\n\nimport fuel_ccp\nfrom fuel_ccp.common import jinja_utils\nfrom fuel_ccp import config\nfrom fuel_ccp import kubernetes\n\n\nCONF = config.CONF\n\nLOG = logging.getLogger(__name__)\n\n\ndef get_ingress_host(ingress_name):\n return '.'.join((ingress_name, CONF.configs.ingress.domain))\n\n\ndef get_ingress_domains(components=None):\n components_map = get_deploy_components_info()\n components = components or components_map.keys()\n domains = []\n for component in components:\n service = components_map[component][\"service_content\"][\"service\"]\n for port in service.get(\"ports\", []):\n if port.get(\"ingress\"):\n domains.append(get_ingress_host(port.get(\"ingress\")))\n return domains\n\n\ndef get_resource_path(path):\n return pkg_resources.resource_filename(fuel_ccp.version_info.package, path)\n\n\ndef get_repositories_paths():\n \"\"\"Get repositories paths.\n\n :returns: list -- list of full repositories paths\n \"\"\"\n paths = []\n for repo in CONF.repositories.repos:\n paths.append(os.path.join(CONF.repositories.path, repo[\"name\"]))\n return paths\n\n\ndef get_config_paths():\n paths = []\n # Order does matter. At first we add global defaults.\n for conf_path in (\"resources/defaults.yaml\", \"resources/globals.yaml\"):\n paths.append(get_resource_path(conf_path))\n\n # After we add component defaults.\n for repo in get_repositories_paths():\n paths.append(os.path.join(repo, \"service\", \"files\", \"defaults.yaml\"))\n\n return paths\n\n\ndef address(service, port=None, external=False, with_scheme=False):\n addr = None\n enable_tls = CONF.configs.get(service, {}).get('tls', {}).get('enabled')\n\n if enable_tls:\n scheme = 'https'\n else:\n scheme = 'http'\n\n if external:\n if not port:\n raise RuntimeError('Port config is required for external address')\n if CONF.configs.ingress.enabled and port.get('ingress'):\n scheme = 'https'\n addr = \"%s:%s\" % (get_ingress_host(port['ingress']),\n CONF.configs.ingress.port)\n elif port.get('node'):\n addr = '%s:%s' % (CONF.configs.k8s_external_ip, port['node'])\n\n if addr is None:\n addr = '.'.join((service, CONF.kubernetes.namespace, 'svc',\n CONF.kubernetes.cluster_domain))\n if port:\n addr = '%s:%s' % (addr, port['cont'])\n\n if with_scheme:\n addr = \"%s://%s\" % (scheme, addr)\n\n return addr\n\n\ndef get_repositories_exports():\n \"\"\"Load shared templates from ./exports dirs of the repositories. \"\"\"\n exports = dict()\n for repo in get_repositories_paths():\n exports_dir = os.path.join(repo, 'exports')\n if os.path.exists(exports_dir) and os.path.isdir(exports_dir):\n for export_file in os.listdir(exports_dir):\n # Due to k8s keys constraints we need to remove non-alpha\n cm_key = ''.join([c for c in export_file if c.isalpha()])\n path = os.path.join(exports_dir, export_file)\n LOG.debug('Found shared jinja template file %s', path)\n if cm_key not in exports:\n exports[cm_key] = {'name': export_file, 'body': ''}\n # Merge the files with same name\n with open(path) as f:\n exports[cm_key]['body'] += f.read() + '\\n'\n return exports\n\n\ndef get_component_name_from_repo_path(path):\n REPO_NAME_PREFIX = \"fuel-ccp-\"\n name = os.path.basename(path)\n if name.startswith(REPO_NAME_PREFIX):\n name = name[len(REPO_NAME_PREFIX):]\n return name\n\n\ndef get_deploy_components_info(rendering_context=None):\n if rendering_context is None:\n rendering_context = CONF.configs._dict\n components_map = {}\n\n for repo in get_repositories_paths():\n service_dir = os.path.join(repo, \"service\")\n if not os.path.isdir(service_dir):\n continue\n component_name = get_component_name_from_repo_path(repo)\n\n component = {\n \"name\": component_name,\n \"upgrades\": {},\n \"service_dir\": service_dir,\n }\n\n upgrade_dir = os.path.join(service_dir, \"upgrade\")\n if os.path.isdir(upgrade_dir):\n for upgrade_fname in os.listdir(upgrade_dir):\n if not upgrade_fname.endswith('.yaml'):\n continue\n LOG.debug(\"Loading upgrade definition: %s\", upgrade_fname)\n with open(os.path.join(upgrade_dir, upgrade_fname)) as f:\n upgrade_def = yaml.load(f)\n key = upgrade_fname[:-len('.yaml')]\n component['upgrades'][key] = upgrade_def\n\n for service_file in os.listdir(service_dir):\n if service_file.endswith('.yaml'):\n LOG.debug(\"Rendering service definition: %s\", service_file)\n content = jinja_utils.jinja_render(\n os.path.join(service_dir, service_file), rendering_context,\n functions=[address]\n )\n LOG.debug(\"Parse service definition: %s\", service_file)\n service_definition = yaml.load(content)\n service_name = service_definition['service']['name']\n components_map[service_name] = {\n 'component': component,\n 'component_name': component_name,\n 'service_dir': service_dir,\n 'service_content': service_definition\n }\n return components_map\n\n\ndef get_deployed_components():\n \"\"\"Returns set of deployed components.\"\"\"\n deployed_deployments = kubernetes.list_cluster_deployments()\n deployed_statefulsets = kubernetes.list_cluster_statefulsets()\n deployed_components = set(kubernetes.get_object_names(\n itertools.chain(deployed_deployments, deployed_statefulsets))\n )\n return deployed_components\n\n\ndef get_nodes_config(nodes):\n nodes_config = config._yaml.AttrDict()\n for node in sorted(nodes):\n if 'configs' in nodes[node]:\n nodes_config[node] = nodes[node]['configs']\n return nodes_config._json(sort_keys=True)\n","sub_path":"fuel_ccp/common/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"479197729","text":"\"\"\" Tests for mailutils module \"\"\"\nimport os\nimport sys\nimport json\nimport pytest\nimport asyncio\nfrom unittest import mock\n\nfrom . import commons\n\nfrom byemail.storage import storage\n\n@pytest.fixture\ndef httpapp(settings):\n from byemail.httpserver import get_app\n return get_app()\n\ndef get_auth_cookie(httpapp):\n data = {\n 'name': 'test',\n 'password': 'test_pass'\n }\n\n # Authenticate\n _ , response = httpapp.test_client.post('/login', data=json.dumps(data))\n\n return {'session_key': response.cookies['session_key'].value}\n\ndef test_basic(httpapp):\n request, response = httpapp.test_client.get('/')\n assert response.status == 200\n\ndef test_auth(httpapp):\n data = {\n 'name': 'test',\n 'password': 'bad_password'\n }\n\n request, response = httpapp.test_client.post('/login', data=json.dumps(data))\n\n assert response.status == 403\n\n data = {\n 'name': 'test',\n 'password': 'test_pass'\n }\n\n request, response = httpapp.test_client.post('/login', data=json.dumps(data))\n\n assert response.status == 200\n\ndef test_send_mail(httpapp):\n\n\n data = {\n \"recipients\":\n [\n {\"address\":\"alt.n2-75zy2uk@yopmail.com\",\"type\":\"to\"}, # test_byemail\n {\"address\":\"alt.n2-75zy2uk@yopmail.com\",\"type\":\"cc\"},\n {\"address\":\"bad@inbox.mailtrap.io\",\"type\":\"cc\"}\n ],\n \"subject\":\"Test mail\",\n \"content\":\"Content\\nMultiline\",\n \"attachments\":[\n {\n \"filename\":\"testfile.txt\",\n \"b64\":\"VGVzdAo=\"\n }\n ]\n }\n \n cookies = get_auth_cookie(httpapp)\n _ , response = httpapp.test_client.post('/api/sendmail/', data=json.dumps(data), cookies=cookies)\n\n assert response.status == 200\n\n assert json.loads(response.body)['delivery_status'] == {\n 'alt.n2-75zy2uk@yopmail.com': {'status': 'DELIVERED', 'smtp_info': ['250', 'Delivered']}, \n 'bad@inbox.mailtrap.io': {'reason': 'SMTP_ERROR', 'smtp_info': \"(554, b'5.5.1 Error: no inbox for this email')\", 'status': 'ERROR'}\n }\n\n\n\ndef test_contacts_search(loop, httpapp, fake_account):\n \"\"\" Test contact search \"\"\"\n\n cookies = get_auth_cookie(httpapp)\n\n request, response = httpapp.test_client.get('/api/contacts/search?text=toto', cookies=cookies)\n\n assert response.status == 200\n\n result = json.loads(response.body)\n\n assert result == []\n\n loop.run_until_complete(storage.get_or_create_mailbox(fake_account, \"titi@localhost\", \"Titi\"))\n\n request, response = httpapp.test_client.get('/api/contacts/search?text=titi', cookies=cookies)\n\n assert response.status == 200\n\n result = json.loads(response.body)\n\n assert result == [\"titi@localhost\"]\n\n\n","sub_path":"byemail/tests/test_httpserver.py","file_name":"test_httpserver.py","file_ext":"py","file_size_in_byte":2732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"40583052","text":"from openroad import Design, Tech\nimport helpers\nimport cts_aux\n\ntech = Tech()\ntech.readLef(\"Nangate45/Nangate45.lef\")\ntech.readLiberty(\"Nangate45/Nangate45_typ.lib\")\n\ndesign = Design(tech)\ndesign.readDef(\"16sinks.def\")\n\ndesign.evalTclString(\"create_clock -period 5 clk\")\ndesign.evalTclString(\"set_wire_rc -clock -layer metal3\")\n\ncts_aux.clock_tree_synthesis(design, root_buf=\"CLKBUF_X3\", buf_list=\"CLKBUF_X3\",\n wire_unit=20)\n","sub_path":"src/cts/test/simple_test.py","file_name":"simple_test.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"280847802","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.4 (3310)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: C:\\Users\\Sascha\\Documents\\PycharmProjects\\maverig\\maverig\\data\\components\\utils\\pyPowerSerializer.py\n# Compiled at: 2015-01-03 06:36:07\n# Size of source mod 2**32: 2965 bytes\nimport json\nfrom maverig.data import dataHandler\n\nclass PyPowerSerializer:\n __doc__ = 'Util class that is used for serialization of the created elements which are depending on the PyPower simulator.\\n Currently refBus, bus, transformer and branch belong to this group of elements.'\n _PyPowerSerializer__PP_JSON_FILE = 'pypower.json'\n\n def __init__(self):\n \"\"\"Initialize the PyPowerSerializer holding the pre-formatted JSON object.\"\"\"\n self.pp_json_object = {'bus': [], 'trafo': [], 'branch': []}\n\n def serialize(self, elements):\n \"\"\"Serialize elements to a JSON formatted object.\n :param elements: dict of elements.\n :return: Serialized JSON formatted object.\n \"\"\"\n self._PyPowerSerializer__build_json(elements)\n return json.dumps(self.pp_json_object, indent=4)\n\n def serialize_to_file(self, elements):\n \"\"\"Serialize elements as a JSON formatted stream to a file (.json).\n :param elements: dict of elements.\n :return: Path of the JSON file.\n \"\"\"\n self._PyPowerSerializer__build_json(elements)\n with open(dataHandler.get_temp_file(self._PyPowerSerializer__PP_JSON_FILE), 'w+') as (f):\n json.dump(self.pp_json_object, f, indent=4)\n f.close()\n return dataHandler.get_temp_file(self._PyPowerSerializer__PP_JSON_FILE)\n\n def __build_json(self, elements):\n \"\"\"Build the JSON formatted object.\n :param elements: dict of elements.\n \"\"\"\n for elem in elements.values():\n if elem['sim_model'] == 'PyPower.RefBus':\n self.pp_json_object['bus'].insert(0, [elem['elem_id'], elem['params']['bus_type'], elem['params']['base_kv']])\n elif elem['sim_model'] == 'PyPower.PQBus':\n self.pp_json_object['bus'].append([\n elem['elem_id'], elem['params']['bus_type'], elem['params']['base_kv']])\n elif elem['sim_model'] == 'PyPower.Transformer':\n self.pp_json_object['trafo'].append([\n elem['elem_id'], elem['params']['fbus'], elem['params']['tbus'], elem['params']['ttype'],\n elem['params']['online'], elem['params']['tap']])\n elif elem['sim_model'] == 'PyPower.Branch':\n self.pp_json_object['branch'].append([\n elem['elem_id'], elem['params']['fbus'], elem['params']['tbus'], elem['params']['btype'],\n elem['params']['l'], elem['params']['online']])\n continue","sub_path":"pycfiles/maverig-1.0.5-py3-none-any/pyPowerSerializer.cpython-34.py","file_name":"pyPowerSerializer.cpython-34.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"21086454","text":"\nclass clusters:\n\n def __init__(self,k,plt):\n self.km=k\n self.plt = plt\n def clusterElbow(self,df,i,n):\n errors=[]\n for k in range(i,n):\n kmean = self.km(k=k,featuresCol=\"features\")\n model = kmean.fit(df)\n intra_distance = model.computeCost(df)\n errors.append(intra_distance)\n cluster_num = range(i,n)\n self.plt.figure(figsize=(15,5))\n self.plt.xlabel('Numbres de clusters')\n self.plt.ylabel('SSE')\n self.plt.plot(cluster_num,errors)\n\n def clusterViz(self,n,df): # affiche le graphique des clusters\n kmean = self.km(k=n,featuresCol=\"features\",predictionCol=\"prediction\",initMode=\"random\")\n cluster_df = kmean.fit(df).transform(df)\n self.cluster_df = cluster_df.toPandas()\n self.cluster_df.head()\n\n def selectOneVarByClass(self,k,pd):# Selection une seul valeur de chaque class\n clusters = pd.DataFrame(index=range(self.cluster_df.shape[0]))\n for i in range(k):\n clusters['class' + str(i)] = pd.Series(self.cluster_df[self.cluster_df['prediction'] == i]['index'].values)\n clusterVar = clusters.dropna()\n clusterVar = clusterVar.reset_index()\n self.clusterVar = clusterVar.drop('index',axis=1)\n\n\n\n\n\n\n\n","sub_path":"clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"323479073","text":"\"\"\" This file is used for the evaluation of models and baselines.\n\"\"\"\nimport json\nfrom datetime import datetime\n\nimport torch\nfrom torch.utils.data import DataLoader\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import top_k_accuracy_score, accuracy_score\nfrom sklearn import preprocessing\n\nfrom tqdm.auto import tqdm, trange\n\nfrom baseline import get_model\nfrom utils.tig_data_set import TIGDataset\nfrom processor import stgcn_metric\n\n\ndef load_eval_data(config: dict, path: str = None, num_classes: int = 50,\n num_samples: int = 994):\n \"\"\" This method loads the data for evaluation based on a given data set config.\n\n Args:\n config: dict\n Configuration of the experiment for which the data should be loaded.\n path: str (optional)\n Path from which local data should be loaded.\n num_classe: int\n Number of classes that should be loaded.\n num_samples: int\n Determines the number of samples per class. The data set will be\n aligned to this number.\n\n Returns:\n (np.Array, np.Array): (x, y) where x are the features and y are the\n corresponding labels.\n \"\"\"\n if path is not None:\n # Load embeddings\n x = np.load(path + \"embeddings_full.npz\")[\"x\"]\n y = np.load(path + \"embeddings_full.npz\")[\"y\"]\n else:\n data_cfg = {**config, **{\"path\": \"./data/\"}}\n\n data = TIGDataset(**data_cfg)\n\n if num_classes is not None:\n # 994 samples used for the kinetics skeleton dataset\n train = data.stratify(num=num_classes, num_samples=num_samples, mode=1)\n # 632 samples are used for the NTU-RGB+D dataset\n # train = data.stratify(num=num_classes, num_samples=632, mode=1)\n x = np.array(list(np.array(train)[:, 0]))\n y = np.array(train)[:, 1].astype(int)\n else:\n x = data.x\n y = data.y\n\n # x = np.mean(x, axis=1).reshape(x.shape[0], -1)\n\n return x, y\n\n\ndef evaluate_experiments(experiments: [tuple], baseline=\"mlp\", repeat_baseline=5,\n repeat_model=5, portion=1., eval_mode=\"model+baseline\",\n num_classes=50, unify_params=False, verbose=True,\n repetition_mode=None):\n \"\"\" Unified method to evaluate different experiments.\n The experiments are evaluated successively.\n\n Args:\n experiments: [tuple]\n List of tuples of the form of (\"MODEL_NAME\", \"PATH_TO_MODEL\").\n baseline: str\n Name of the baseline that is used for evaluation.\n Options: [\"mlp\", \"svm\", None]\n repeat_baseline: int\n Number of repetition of train and evaluate baseline.\n portion: float\n Defining which portion of the data should be used for evaluation.\n with_baseline: bool or str\n Flag to only evaluate the encoder, only the baseline or both.\n For only evaluting the encode use True and for evaluating only\n the baseline use \"only\".\n num_classes: str\n Number of classes in the data. That number has to match with\n the number of classes that was used to train the TIG encoder.\n unify_params: bool\n Flag to unify the number of parameters of the baseline model\n with the number of parameters of the classifier that is appended\n to the TIG encoder. This is sometimes needed because the\n embedding size coming from the encoder can differ to the\n embedding size of the baseline data.\n verbose: bool\n Flag to print the progress.\n random_state: int\n Random state for the shuffle operation.\n repetition_mode: str\n Defines which data within one iteration is used (when the\n evaluation will be repeated). When repetition_mode is\n \"same_data\" we use always the same encodings and shuffle it.\n In case repetion_mode != \"same_data\" we use different encodings,\n i.e. the TIG model encoded multiple times (default 5) the same\n chunk of data.\n \"\"\"\n results = []\n\n # Open the first config to get the general data configuration for the\n # first experiment. Data configuration is across all iteration from one\n # experiment the same.\n with open(f\"{experiments[0][1]}/0/config.json\") as file:\n config = json.load(file)\n\n for name, path in tqdm(experiments):\n # For each experiment load the configuration (configs differs\n # across different experiments).\n with open(f\"{path}0/config.json\") as file:\n config = json.load(file)\n\n # Defalt value for number of parameters.\n num_params = -1\n top1, top5 = [], []\n top1_baseline, top5_baseline = None, None\n\n ### Evaluate Model ####\n if repetition_mode == \"same_data\" and \"model\" in eval_mode:\n # Load data once and repeat evaluation on the same data.\n x, y = load_eval_data(config[\"data\"], path=f\"{path}/0/\")\n top1, top5, num_params = run_evaluation(x, y, baseline,\n repeat=repeat_baseline,\n portion=portion,\n verbose=verbose)\n elif \"model\" in eval_mode:\n # Load n (default n=5) times different encodings and evaluate each encoding. \n for i in range(repeat_model):\n try:\n x, y = load_eval_data(config[\"data\"], path=f\"{path}/{i}/\")\n t1, t5, num_params = run_evaluation(x, y, baseline,\n repeat=1,\n portion=portion,\n portion_state=i,\n verbose=verbose)\n top1.append(t1)\n top5.append(t5)\n # pylint: disable=broad-except\n except Exception as e:\n # Just for debugging purpose.\n print(e)\n pass\n\n # Get percentage value of mean and standard deviation.\n top1, top1_std, top5, top5_std = (np.mean(top1) * 100,\n np.std(top1) * 100,\n np.mean(top5) * 100,\n np.std(top5) * 100)\n\n ### Evaluate Baseline ####\n if \"baseline\" in eval_mode:\n # Load Baseline Data\n x, y = load_eval_data(config[\"data\"], num_classes=num_classes)\n x = np.mean(x, axis=1).reshape(x.shape[0], -1)\n\n if unify_params and config[\"model\"][\"architecture\"][-1][-1] > 72:\n layer_size = (config[\"model\"][\"architecture\"][-1][-1] - 72) + 100\n eval_temp = run_evaluation(x, y, baseline, repeat=repeat_baseline,\n portion=portion, hidden_layer_sizes=[layer_size])\n else:\n eval_temp = run_evaluation(x, y, baseline,\n repeat=repeat_baseline,\n portion=portion)\n\n top1_baseline, top5_baseline, num_params = eval_temp\n\n top1_baseline, top1_baseline_std = (np.mean(top1_baseline) * 100,\n np.std(top1_baseline) * 100)\n top5_baseline, top5_baseline_std = (np.mean(top5_baseline) * 100,\n np.std(top5_baseline) * 100)\n\n try:\n model = torch.load(f\"{path}/0/TIG_.pt\", map_location=torch.device('cpu'))\n except: # noqa: E722\n model = None\n\n results.append({\n \"Model\": name + f\" + {baseline}\",\n \"Top-1 Accuracy\": top1,\n \"Top-1 Std.\": top1_std,\n \"Top-5 Accuracy\": top5,\n \"Top-5 Std.\": top5_std,\n \"# Epochs\": config[\"training\"][\"n_epochs\"],\n \"Architecture\": config[\"model\"][\"architecture\"],\n \"# TIG Parameter\": sum(p.numel() for p in model.parameters() if p.requires_grad) if model is not None else 0,\n \"Embedding Dimension\": config[\"model\"][\"architecture\"][-1][-1],\n \"# Downstream Parameter\": num_params,\n \"Objective\": config[\"loss\"],\n \"Model Cfg\": config[\"model\"],\n \"Portion\": portion,\n \"Batch Size\": config[\"loader\"][\"batch_size\"],\n \"Num Samples\": int(x.shape[0] * portion),\n \"Samples Per Class\": np.unique(y, return_counts=True)[1][0],\n \"Classes\": np.unique(y).shape[0],\n \"Timestamp\": datetime.now()\n })\n\n results.append({\n \"Model\": baseline,\n \"Top-1 Accuracy\": top1_baseline,\n \"Top-1 Std.\": top1_baseline_std,\n \"Top-1 Relative\": \"-\",\n \"Top-5 Accuracy\": top5_baseline,\n \"Top-5 Std.\": top5_baseline_std,\n \"Top-5 Relative\": \"-\",\n \"# Epochs\": \"-\",\n \"Architecture\": \"-\",\n \"# TIG Parameter\": \"-\",\n \"Embedding Dimension\": x.shape[1],\n \"# Downstream Parameter\": num_params,\n \"Objective\": \"-\",\n \"Model Cfg\": \"-\",\n \"Portion\": portion,\n \"Batch Size\": \"-\",\n \"Num Samples\": int(x.shape[0] * portion),\n \"Samples Per Class\": np.unique(y, return_counts=True)[1][0],\n \"Classes\": np.unique(y).shape[0],\n \"Timestamp\": datetime.now()\n })\n\n results = pd.DataFrame(results)\n\n if top1_baseline is not None:\n results[\"Top-1 Relative\"] = results[\"Top-1 Accuracy\"] - top1_baseline\n if top5_baseline is not None:\n results[\"Top-5 Relative\"] = results[\"Top-5 Accuracy\"] - top5_baseline\n \n columns_ordered = [\"Model\", \"Top-1 Accuracy\", \"Top-1 Std.\", \"Top-1 Relative\", \"Top-5 Accuracy\", \"Top-5 Std.\", \"Top-5 Relative\",\n \"# Epochs\", \"Architecture\", \"# TIG Parameter\", \"Embedding Dimension\", \"# Downstream Parameter\", \"Objective\",\n \"Model Cfg\", \"Portion\", \"Batch Size\", \"Num Samples\", \"Samples Per Class\", \"Classes\", \"Timestamp\"]\n\n return results[columns_ordered]\n\n\ndef run_evaluation(x_in: np.ndarray, y_in: np.ndarray, baseline: str = \"svm\",\n repeat: int = 10, ret_model: bool = False, verbose: bool = True,\n portion: float = 1., portion_state: int = None, **baseline_params):\n \"\"\" Executes a concrete evaluation run.\n\n Args:\n x_in: np.ndarray\n Numpy array with the input data on which the evaluation should\n be executed.\n y_in: np.ndarray\n Corresponding labels to the input data in x_in as numpy array.\n baseline: str\n Determines which baseline is used. Options [\"svm\", \"mlp\"]\n repeat: int\n Number of repetitions that will be performed.\n ret_model: bool\n Flag to decide if the model will be returned.\n verbose: bool\n Flag to decide if the process should be printed to the console.\n portion: float\n Determines which fraction of the data is used. E.g. 1 for the\n whole data and 0.1 for 10%.\n portion_state: int\n Random state when splitting the date into the portion. If None\n the number of iteration is used for reprodiciblity.\n **baseline_params:\n Parameters for the sklean baseline.\n\n Returns:\n (top1-accuracy, top5-accuracy, -1) or\n (top1-accuracy, top5-accuracy, num_model_coeff)\n \"\"\"\n top1 = []\n top5 = []\n\n pbar = trange(repeat, desc=\"Top1: - | Top2: - \", disable=(not verbose))\n\n for i in pbar:\n if portion < 1:\n # x, y = x[idx], y[idx]\n # x, y = get_portion(x, y, portion)\n random_state = (i if portion_state is None else portion_state)\n _, x, _, y = train_test_split(x_in, y_in, stratify=y_in,\n test_size=portion,\n random_state=random_state)\n else:\n x, y = x_in, y_in\n\n try:\n x_train, x_test, y_train, y_test = train_test_split(x, y, stratify=y, random_state=i)\n # pylint: disable=broad-except\n except Exception:\n x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=i)\n\n model = get_model(baseline, verbose=0, random_state=i, **baseline_params)\n model.fit(x_train, y_train)\n\n if baseline == \"svm\":\n yhat = model.decision_function(x_test)\n elif baseline == \"mlp\":\n yhat = model.predict_proba(x_test)\n\n try:\n top1.append(top_k_accuracy_score(y_test, yhat, k=1))\n top5.append(top_k_accuracy_score(y_test, yhat, k=5))\n # pylint: disable=broad-except\n except Exception:\n top1.append(accuracy_score(y_test, model.predict(x_test)))\n top5.append(-1)\n\n top1_mean, top5_mean = np.mean(top1) * 100, np.mean(top5) * 100\n pbar.set_description(f\"Top1: {'{:.4f}'.format(top1_mean)} | Top2: {'{:.4f}'.format(top5_mean)}\")\n\n if ret_model:\n return top1, top5, model\n else:\n if baseline == \"mlp\":\n return top1, top5, sum([len(x) for x in model.coefs_])\n else:\n return top1, top5, -1\n\n\ndef get_metric_stats(experiments: [tuple], exp_repetitions: int = 5):\n \"\"\" Function to get the mean and standard deviations of the metrics of an experiment along the repetitions.\n\n Args:\n experiments: [tuple]\n List of tuples of the form (EXPERIMENT_NAME, EXPERIMENT_PATH) with\n type (str, str).\n exp_repetitions: int\n Number of repetitions of the experiment, i.e. how of the experiment\n was repeated. The stats of the the metrics are calculated along the\n repetitions.\n Return:\n np.ndarray: List of the mean and standard deviation over the repetitions of the metrics\n for each experiment. The list has the dimension/length of (num_experiments)\n and each entry has the form of\n (EXPERIMENT_NAME, [AUC_MEAN], [AUC_STD], [PREC_MEAN], [PREC_STD]) of type\n (str, [float], [float], [float], [float]).\n \"\"\"\n auc = []\n prec = []\n\n result = []\n\n for exp in experiments:\n try:\n for i in range(exp_repetitions):\n metrics = np.load(exp[1] + str(i) + \"/TIG_train.metrics.npz\")\n auc.append(np.squeeze(metrics[\"auc\"]))\n prec.append(np.squeeze(metrics[\"precision\"]))\n\n # E.g. \"auc\" has the form (num_repetitions, num_epochs). This holds also for \"prec\".\n result.append([exp[0], np.mean(auc, axis=0), np.std(auc, axis=0),\n np.mean(prec, axis=0), np.std(prec, axis=0)])\n auc, prec = [], []\n # pylint: disable=broad-except\n except Exception:\n # For the case that there are no metrics stored.\n pass\n\n return np.array(result)\n\n\ndef evaluate_pytorch_model(experiments: [tuple], device: str = \"cuda\", repeat: int = 10,\n verbose: bool = True, portion: int = 1):\n \"\"\" Evaluate a single pytorch model (not the TIG model). \n \n This method is not used.\n\n Important: This method does not evaluate the TIG model since the TIG model is evaluated\n by downstream models. One can use this methode for example for the STGCN model, which \n has the classifier included.\n\n Args:\n experiments: [tuple]\n A list of tuples, where a tuple consists of the name of the experiment and \n the path.\n device: str\n Pytorch device, e.g. \"cuda\" or \"cpu\".\n repeat: int\n Number of repetitions of the evaluation.\n verbose: bool\n Flag to determine if the progress is printed to the console/output.\n portion: int\n Percentage defining which fraction of the data is used.\n Return:\n pd.DataFrame: Dataframe containing the results of the evaluation per experiment.\n \"\"\"\n results = []\n\n for name, path in tqdm(experiments):\n with open(path + \"config.json\") as file:\n config = json.load(file)\n\n model = torch.load(path + \"STGCN.pt\").to(device)\n\n # Those steps aligne to the portion of data used in the training.\n data_cfg = {**config[\"data\"], **{\"name\": \"stgcn_50_classes\", \"path\": \"../content/\"}}\n data = TIGDataset(**data_cfg)\n\n if \"stratify\" in config:\n train = data.stratify(**config[\"stratify\"]) # num=2, num_samples=100, mode=1)\n x = np.array(train)[:, 0]\n y = np.array(train)[:, 1].astype(int)\n else:\n x = data.x\n y = data.y\n\n thr = int(x.shape[0] * portion)\n if portion < 1:\n classes = np.unique(y)\n num_samples = thr // len(classes)\n idx = np.array([], dtype=int)\n\n for cls in classes:\n idx = np.append(idx, np.where(y == cls)[0][:num_samples])\n\n x = x[idx]\n y = y[idx]\n\n # num_classes = len(np.unique(y))\n\n le = preprocessing.LabelEncoder()\n y = le.fit_transform(y)\n\n top1 = []\n top5 = []\n\n pbar = trange(repeat, desc=\"Top1: - | Top2: - \", disable=(not verbose))\n\n for i in pbar:\n _, X_test, _, y_test = train_test_split(x, y, random_state=i + 50)\n val_loader = DataLoader(list(zip(X_test, y_test)), **config[\"loader\"])\n\n with torch.no_grad():\n top1_batch = []\n top5_batch = []\n for batch_x, batch_y in val_loader:\n # Input (batch_size, time, nodes, features)\n batch_x = batch_x.type(\"torch.FloatTensor\").to(device)\n N, T, V, C = batch_x.size()\n batch_x = batch_x.permute(0, 3, 1, 2).view(N, C, T, V // 2, 2)\n\n # batch_x = batch_x.type(\"torch.FloatTensor\").permute(0, 3, 2, 1).to(device)\n\n yhat = model(batch_x.to(device))\n\n metric = stgcn_metric(yhat, batch_y)\n\n t1 = metric[\"top-1\"]\n t5 = metric[\"top-5\"]\n top1_batch.append(t1)\n top5_batch.append(t5)\n\n top1.append(np.mean(top1_batch))\n top5.append(np.mean(top5_batch))\n\n pbar.set_description((f\"Top1: {'{:.4f}'.format(np.mean(top1)*100)} \"\n f\"| Top2: {'{:.4f}'.format(np.mean(top5)*100)}\"))\n\n results.append({\n \"Model\": name,\n \"Top-1 Accuracy\": np.mean(top1) * 100,\n \"Top-1 Std.\": np.std(top1) * 100,\n \"Top-5 Accuracy\": np.mean(top5) * 100,\n \"Top-5 Std.\": np.std(top5) * 100,\n \"# Parameter\": sum(p.numel() for p in model.parameters() if p.requires_grad),\n \"Portion\": portion,\n \"Num Samples\": x.shape[0],\n \"Classes\": np.unique(y)\n })\n\n return pd.DataFrame(results)\n","sub_path":"evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":20034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"648079605","text":"from django.views.generic import ListView, DeleteView\nfrom jquery_upload.views import PartialUploadCacheMixin, BaseUploadContentView, UploadTempStorageFileSystem, \\\n JSONResponseMixin\nfrom django_js_initial_bootstrap.views import ListSerializerBootstrapMixin\nfrom rest_framework import serializers\nfrom uploadkit.models import Thingy\n\n\nclass UploadContentView(PartialUploadCacheMixin, BaseUploadContentView):\n model = Thingy\n temp_storage = UploadTempStorageFileSystem('temp-thingies')\n file_field_name = 'content'\n delete_view_name = 'delete-thingy'\n\n def decorate_instance(self, instance):\n instance.extra = 'hello!'\n\n def make_upload_response(self, expected_file_name):\n r = super(UploadContentView, self).make_upload_response(expected_file_name)\n r['delete_url'] = self.object.delete_url()\n return r\n\n\nclass DeleteContentView(JSONResponseMixin, DeleteView):\n model = Thingy\n file_field_name = 'content'\n\n def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n getattr(self.object, self.file_field_name).delete()\n self.object.delete()\n return self.render_to_response({'deleted': True})\n\n\nclass MetaSerializer(serializers.Serializer):\n size = serializers.IntegerField()\n delete_url = serializers.CharField()\n\n\nclass MySerializer(serializers.ModelSerializer):\n meta = MetaSerializer()\n name = serializers.CharField()\n\n class Meta:\n model = Thingy\n fields = ('name', 'meta')\n\n\nclass SampleView(ListSerializerBootstrapMixin, ListView):\n template_name = 'uploader.html'\n model = Thingy\n bootstrapped_context_object = 'bootstrappedFiles'\n serializer_class = MySerializer","sub_path":"uploadkit/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"422094256","text":"# coding: utf-8\nfrom flask import Flask, render_template, request, redirect, url_for\nimport numpy as np\nfrom CPData import CardPack as cp\n\n\napp = Flask(__name__)\n\n\n# Main\nclass ELEMENT(object):\n def __init__(self):\n self._count = 0\n self._price = 0\n self._bronze = 0\n self._silver = 0\n self._gold = 0\n self._legend = 0\n\n def initialize(self):\n self._count = 0\n self._price = 0\n self._bronze = 0\n self._silver = 0\n self._gold = 0\n self._legend = 0\n\n def getcount(self):\n return self._count\n\n def setcount(self, count):\n self._count = count\n\n def getprice(self):\n return self._price\n\n def setprice(self, price):\n self._price = price\n\n def getbronze(self):\n return self._bronze\n\n def setbronze(self, bronze):\n self._bronze = bronze\n\n def getsilver(self):\n return self._silver\n\n def setsilver(self, silver):\n self._silver = silver\n\n def getgold(self):\n return self._gold\n\n def setgold(self, gold):\n self._gold = gold\n\n def getlegend(self):\n return self._legend\n\n def setlegend(self, legend):\n self._legend = legend\n\n count = property(getcount, setcount)\n price = property(getprice, setprice)\n bronze = property(getbronze, setbronze)\n silver = property(getsilver, setsilver)\n gold = property(getgold, setgold)\n legend = property(getlegend, setlegend)\n\n\nstandard = ELEMENT()\nevolved = ELEMENT()\nbahamut = ELEMENT()\npack_all = ELEMENT()\n\n\ndef pickup_cardpack(pack):\n weight_premium = [0.92, 0.08]\n pickup_card = np.random.choice(pack)\n premium_card = pickup_card + ' PREMIUM'\n choice_premium = [pickup_card, premium_card]\n pickup_premium = np.random.choice(choice_premium, p=weight_premium)\n return pickup_premium\n\n\ndef pickup_standard_rare():\n allrarity = (\"bronze\", \"silver\", \"gold\", \"legend\")\n nonbronze = (\"silver\", \"gold\", \"legend\")\n weight_all = [0.675, 0.25, 0.06, 0.015]\n weight_nonbronze = [0.925, 0.06, 0.015]\n result = []\n for v in range(7):\n pickup_rarity = np.random.choice(allrarity, p=weight_all)\n if \"bronze\" in pickup_rarity:\n result.append('Bron:' + pickup_cardpack(cp.standard_bronze))\n standard.bronze += 1\n pack_all.bronze += 1\n elif \"silver\" in pickup_rarity:\n result.append('Silv:' + pickup_cardpack(cp.standard_silver))\n standard.silver += 1\n pack_all.silver += 1\n elif \"gold\" in pickup_rarity:\n result.append('Gold:' + pickup_cardpack(cp.standard_gold))\n standard.gold += 1\n pack_all.gold += 1\n elif \"legend\" in pickup_rarity:\n result.append('Lege:' + pickup_cardpack(cp.standard_legend))\n standard.legend += 1\n pack_all.legend += 1\n for v in range(1):\n pickup_rarity = np.random.choice(nonbronze, p=weight_nonbronze)\n if \"silver\" in pickup_rarity:\n result.append('Silv:' + pickup_cardpack(cp.standard_silver))\n standard.silver += 1\n pack_all.silver += 1\n elif \"gold\" in pickup_rarity:\n result.append('Gold:' + pickup_cardpack(cp.standard_gold))\n standard.gold += 1\n pack_all.gold += 1\n elif \"legend\" in pickup_rarity:\n result.append('Lege:' + pickup_cardpack(cp.standard_legend))\n standard.legend += 1\n pack_all.legend += 1\n return result\n\n\ndef pickup_evolved_rare():\n allrarity = (\"bronze\", \"silver\", \"gold\", \"legend\")\n nonbronze = (\"silver\", \"gold\", \"legend\")\n weight_all = [0.675, 0.25, 0.06, 0.015]\n weight_nonbronze = [0.925, 0.06, 0.015]\n result = []\n for v in range(7):\n pickup_rarity = np.random.choice(allrarity, p=weight_all)\n if \"bronze\" in pickup_rarity:\n result.append(pickup_cardpack(cp.evolved_bronze))\n evolved.bronze += 1\n pack_all.bronze += 1\n elif \"silver\" in pickup_rarity:\n result.append(pickup_cardpack(cp.evolved_silver))\n evolved.silver += 1\n pack_all.silver += 1\n elif \"gold\" in pickup_rarity:\n result.append(pickup_cardpack(cp.evolved_gold))\n evolved.gold += 1\n pack_all.gold += 1\n elif \"legend\" in pickup_rarity:\n result.append(pickup_cardpack(cp.evolved_legend))\n evolved.legend += 1\n pack_all.legend += 1\n for v in range(1):\n pickup_rarity = np.random.choice(nonbronze, p=weight_nonbronze)\n if \"silver\" in pickup_rarity:\n result.append(pickup_cardpack(cp.evolved_silver))\n evolved.silver += 1\n pack_all.silver += 1\n elif \"gold\" in pickup_rarity:\n result.append(pickup_cardpack(cp.evolved_gold))\n evolved.gold += 1\n pack_all.gold += 1\n elif \"legend\" in pickup_rarity:\n result.append(pickup_cardpack(cp.evolved_legend))\n evolved.legend += 1\n pack_all.legend += 1\n return result\n\n\ndef pickup_bahamut_rare():\n allrarity = (\"bronze\", \"silver\", \"gold\", \"legend\")\n nonbronze = (\"silver\", \"gold\", \"legend\")\n weight_all = [0.675, 0.25, 0.06, 0.015]\n weight_nonbronze = [0.925, 0.06, 0.015]\n result = []\n for v in range(7):\n pickup_rarity = np.random.choice(allrarity, p=weight_all)\n if \"bronze\" in pickup_rarity:\n result.append(pickup_cardpack(cp.bahamut_bronze))\n bahamut.bronze += 1\n pack_all.bronze += 1\n elif \"silver\" in pickup_rarity:\n result.append(pickup_cardpack(cp.bahamut_silver))\n bahamut.silver += 1\n pack_all.silver += 1\n elif \"gold\" in pickup_rarity:\n result.append(pickup_cardpack(cp.bahamut_gold))\n bahamut.gold += 1\n pack_all.gold += 1\n elif \"legend\" in pickup_rarity:\n result.append(pickup_cardpack(cp.bahamut_legend))\n bahamut.legend += 1\n pack_all.legend += 1\n for v in range(1):\n pickup_rarity = np.random.choice(nonbronze, p=weight_nonbronze)\n if \"silver\" in pickup_rarity:\n result.append(pickup_cardpack(cp.bahamut_silver))\n bahamut.silver += 1\n pack_all.silver += 1\n elif \"gold\" in pickup_rarity:\n result.append(pickup_cardpack(cp.bahamut_gold))\n bahamut.gold += 1\n pack_all.gold += 1\n elif \"legend\" in pickup_rarity:\n result.append(pickup_cardpack(cp.bahamut_legend))\n bahamut.legend += 1\n pack_all.legend += 1\n return result\n\n\ndef buy_standard():\n standard.price += 240\n standard.count += 1\n pack_all.price += 240\n pack_all.count += 1\n return pickup_standard_rare()\n\n\ndef buy_evolved():\n evolved.price += 240\n evolved.count += 1\n pack_all.price += 240\n pack_all.count += 1\n return pickup_evolved_rare()\n\n\ndef buy_bahamut():\n bahamut.price += 240\n bahamut.count += 1\n pack_all.price += 240\n pack_all.count += 1\n return pickup_bahamut_rare()\n\n\ndef count_reset():\n standard.initialize()\n evolved.initialize()\n bahamut.initialize()\n pack_all.initialize()\n\n\n# Routing\n@app.route('/')\ndef index():\n title = \"しゃどばがちゃしみゅ\"\n message = \"全3弾のパックをシミュレート!\"\n return render_template('index.html', message=message, title=title)\n\n\n@app.route('/post', methods=['POST', 'GET'])\ndef post():\n message = \"\"\n if request.method == 'POST':\n result = []\n if 'standard' in request.form:\n title = \"しゃどばがちゃしみゅ\"\n result = buy_standard()\n if 'evolved' in request.form:\n title = \"しゃどばがちゃしみゅ\"\n result = buy_evolved()\n if 'bahamut' in request.form:\n title = \"しゃどばがちゃしみゅ\"\n result = buy_bahamut()\n if 'reset' in request.form:\n title = \"しゃどばがちゃしみゅ\"\n count_reset()\n result = \"\"\n message = \"ぜーんぶきえちゃったっ!\"\n return render_template('index.html', result=result,\n standard=standard, evolved=evolved,\n bahamut=bahamut, pack_all=pack_all,\n title=title, message=message)\n else:\n return redirect(url_for('index'))\n\nif __name__ == '__main__':\n app.debug = True\n app.run(host='0.0.0.0')","sub_path":"shadowverse_gachagacha/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"132367100","text":"import sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport csv\n\nimport tensorflow as tf\nimport tflearn\nfrom tflearn.data_utils import to_categorical\n\n\ndef main():\n \"\"\"\n Your main() function should read in the provided csv file\n and call your two neural networks. It should not output anything\n other than the default tflearn output.\n \"\"\"\n csv_file_name = ''\n do_linear = False\n if len(sys.argv) == 3:\n csv_file_name = sys.argv[1]\n do_linear = sys.argv[2] == '0'\n\n position, classification = read_csv(csv_file_name)\n\n plot_spiral(position, classification, 'spiral_plot')\n\n if do_linear:\n linear_model = linear_classifier(position, classification, 4)\n plot_spiral_and_predicted_class(position, classification,\n linear_model, 'linear_predicted',\n 'Predictions Using Linear Classifier')\n else:\n nonlinear_model = non_linear_classifier(position, classification, 4)\n plot_spiral_and_predicted_class(position, classification,\n nonlinear_model, 'nonlinear_predicted',\n 'Predictions Using Non-linear '\n 'Classifier')\n\n\ndef linear_classifier(position_array, class_array, n_classes):\n \"\"\"\n Here you will implement a linear neural network that will classify the input data. The input data is\n an x, y coordinate (in 'position_array') and a classification for that x, y coordinate (in 'class_array'). The\n order of the data in 'position_array' corresponds with the order of the data in 'class_array', i.e., the ith element\n in 'position_array' is classified by the ith element in 'class_array'.\n\n Your neural network will have an input layer that has two input nodes (an x coordinate and y coordinate)\n and an output layer that has four nodes (one for each class) with a softmax activation.\n\n :param position_array: a 2D np array of size [n_examples, 2] that contains an x,y position for each point\n :param class_array: a 1D np array of size [n_examples]\n :param n_classes: an integer that is the number of classes your data has\n \"\"\"\n # Preprocess classification data to get into array of 0s and 1s\n class_array_binary = np.zeros(shape=[len(class_array), n_classes])\n for index, classification in enumerate(class_array):\n class_array_binary[index][classification] = 1\n\n # linear classifier\n with tf.Graph().as_default():\n net = tflearn.input_data(shape=[None, 2])\n net = tflearn.fully_connected(net, 4, activation='softmax')\n net = tflearn.regression(net, loss='categorical_crossentropy')\n\n model = tflearn.DNN(net)\n model.fit(position_array, class_array_binary, n_epoch=10,\n batch_size=10, show_metric=True, snapshot_step=1)\n\n return model\n\n\ndef non_linear_classifier(position_array, class_array, n_classes):\n \"\"\"\n Here you will implement a non-linear neural network that will classify the input data. The input data is\n an x, y coordinate (in 'position_array') and a classification for that x, y coordinate (in 'class_array'). The\n order of the data in 'position_array' corresponds with the order of the data in 'class_array', i.e., the ith element\n in 'position_array' is classified by the ith element in 'class_array'.\n\n Your neural network should have three layers total. An input layer and two fully connected layers\n (meaning that the middle layer is a hidden layer). The second fully connected layer is the output\n layer (so it should have 4 nodes and a softmax activation function). You get to decide how many\n nodes the middle layer has and the activation function that it uses.\n\n :param position_array: a 2D np array of size [n_examples, 2] that contains an x,y position for each point\n :param class_array: a 1D np array of size [n_examples]\n :param n_classes: an integer that is the number of classes your data has\n \"\"\"\n # Preprocess classification data to get into array of 0s and 1s\n class_array_binary = np.zeros(shape=[len(class_array), n_classes])\n for index, classification in enumerate(class_array):\n class_array_binary[index][classification] = 1\n\n # linear classifier\n with tf.Graph().as_default():\n net = tflearn.input_data(shape=[None, 2])\n net = tflearn.fully_connected(net, 4, activation='prelu')\n net = tflearn.fully_connected(net, 4, activation='softmax')\n net = tflearn.regression(net, loss='categorical_crossentropy')\n\n model = tflearn.DNN(net)\n model.fit(position_array, class_array_binary, n_epoch=10,\n batch_size=10, show_metric=True, snapshot_step=1)\n\n return model\n\n\ndef plot_spiral_and_predicted_class(position_array, class_array, model, output_file_name, title):\n \"\"\"\n This function plots the spirals with each position with its class colored and the space colored to show\n what the model predicts.\n\n :param position_array: a 2D np array of size [n_examples, 2] that contains an x,y position for each point\n :param class_array: a 1D np array of size [n_examples]\n :param model: a tflearn model object that will be used to color the space\n :param output_file_name: string containing a name for the output file\n :param title: title for the plot\n \"\"\"\n h = 0.02\n x_min, x_max = position_array[:, 0].min() - 1, position_array[:, 0].max()\\\n + 1\n y_min, y_max = position_array[:, 1].min() - 1, position_array[:, 1].max()\\\n + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n z = np.argmax(model.predict(np.c_[xx.ravel(), yy.ravel()]), axis=1)\n z = z.reshape(xx.shape)\n plt.close('all')\n fig = plt.figure()\n plt.contourf(xx, yy, z, cmap=plt.cm.coolwarm, alpha=0.8)\n plt.scatter(position_array[:, 0], position_array[:, 1], c=class_array, s=40, cmap=plt.cm.coolwarm)\n plt.xlim(xx.min(), xx.max())\n plt.ylim(yy.min(), yy.max())\n\n plt.title(title)\n fig.savefig(output_file_name)\n\n\ndef plot_spiral(position_array, class_array, output_file_name):\n \"\"\"\n This function only plots the spirals with each position with its class colored.\n Use this to visualize the data before you run your models.\n\n :param position_array: a 2D np array of size [n_examples, 2] that contains an x,y position for each point\n :param class_array: a 1D np array of size [n_examples]\n :param output_file_name: string containing a name for the output file\n :return:\n \"\"\"\n fig = plt.figure()\n plt.scatter(position_array[:, 0], position_array[:, 1], c=class_array, s=40, cmap=plt.cm.coolwarm)\n plt.xlim([-1,1])\n plt.ylim([-1,1])\n fig.savefig(output_file_name)\n\n\ndef get_accuracy(position_array, class_array, model):\n \"\"\"\n Gets the accuracy of your model\n :param position_array: a 2D np array of size [n_examples, 2] that contains an x,y position for each point\n :param class_array: a 1D np array of size [n_examples]\n :param model: a tflearn model\n :return: a float in the range [0.0, 1.0]\n \"\"\"\n return np.mean(class_array == np.argmax(model.predict(position_array), axis=1))\n\n\ndef read_csv(path_to_file):\n \"\"\"\n Reads the csv file to input\n :param path_to_file: path to the csv file\n :return: a numpy array of positions, and a numpy array of classifications\n \"\"\"\n position = []\n classification = []\n with open(path_to_file, 'r') as csv_file:\n reader = csv.reader(csv_file)\n next(reader, None) # skip the header\n\n for row in reader:\n position.append(np.array([float(row[0]), float(row[1])]))\n classification.append(float(row[2]))\n\n return np.array(position), np.array(classification, dtype='uint8')\n\nif __name__ == '__main__':\n main()","sub_path":"HW8/give-to-students/spiral_classifier.py","file_name":"spiral_classifier.py","file_ext":"py","file_size_in_byte":7924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"363424933","text":"import matplotlib.pyplot as plt\nfrom sklearn.linear_model import Ridge\nfrom sklearn import preprocessing\nimport prepare_data\nfrom metrics import NRMSE\n\n# считываем и нормализуем данные\ntraining_object_count, feature_count, training_features, training_labels, test_features, test_labels = prepare_data.read_from_file()\ntraining_features = preprocessing.normalize(training_features, axis=0)\ntest_features = preprocessing.normalize(test_features, axis=0)\n\n# ПОДБОР РЕГУЛЯРИЗАЦИИ:\nmetrics = []\n# на самом деле тут шаг в 0.1 от 0 до 150\nfor i in range(0, 1500, 1):\n regularization = i / 10\n # на примере 100 итераций\n model = Ridge(solver='lsqr', max_iter=100, alpha=regularization)\n model.fit(training_features, training_labels)\n training_NRMSE = NRMSE(model.predict(training_features), training_labels)\n test_NRMSE = NRMSE(model.predict(test_features), test_labels)\n metrics.append({'regularization': regularization, 'training_NRMSE': training_NRMSE, 'test_NRMSE': test_NRMSE})\n\nplt.plot([point['regularization'] for point in metrics], [point['training_NRMSE'] for point in metrics])\nplt.xlabel('regularization')\nplt.ylabel('training_NRMSE')\nplt.show()\n\nplt.plot([point['regularization'] for point in metrics], [point['test_NRMSE'] for point in metrics])\nplt.xlabel('regularization')\nplt.ylabel('test_NRMSE')\nplt.show()\n","sub_path":"_least_squares.py","file_name":"_least_squares.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"29957","text":"valor1 = float(input(\"Digite o valor 1: \"))\r\nvalor2 = float(input(\"Digite o valor 2: \"))\r\nvalor3 = float(input(\"Digite o valor 3: \"))\r\n\r\nif (valor1 < valor2) and (valor2 < valor3) and (valor1 < valor3):\r\n soma = valor1 + valor2 + valor3\r\n multiplicacao = valor1 * valor2 * valor3\r\n media = ((valor1 + valor2 + valor3) / 3)\r\n\r\n print(f\"Soma dos 3 valores: {soma:.2f}\")\r\n print(f\"Multiplicação dos 3 valores: {multiplicacao:.2f}\")\r\n print(f\"Média dos 3 valores: {media:.2f}\")\r\n\r\nelse:\r\n print(\"Digite os valores em ordem crescente.\")\r\n\r\n\r\n\r\n","sub_path":"21.py","file_name":"21.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"582630113","text":"\"\"\"Tests for utility functions in libtmux.\"\"\"\n\nimport re\nimport sys\nimport typing as t\nfrom typing import Optional\n\nimport pytest\n\nimport libtmux\nfrom libtmux._compat import LooseVersion\nfrom libtmux.common import (\n TMUX_MAX_VERSION,\n TMUX_MIN_VERSION,\n get_libtmux_version,\n get_version,\n has_gt_version,\n has_gte_version,\n has_lt_version,\n has_lte_version,\n has_minimum_version,\n has_version,\n session_check_name,\n tmux_cmd,\n)\nfrom libtmux.exc import BadSessionName, LibTmuxException, TmuxCommandNotFound\nfrom libtmux.session import Session\n\nversion_regex = re.compile(r\"([0-9]\\.[0-9])|(master)\")\n\n\ndef test_allows_master_version(monkeypatch: pytest.MonkeyPatch) -> None:\n class Hi:\n stdout: t.ClassVar = [\"tmux master\"]\n stderr = None\n\n def mock_tmux_cmd(*args: t.Any, **kwargs: t.Any) -> Hi:\n return Hi()\n\n monkeypatch.setattr(libtmux.common, \"tmux_cmd\", mock_tmux_cmd)\n\n assert has_minimum_version()\n assert has_gte_version(TMUX_MIN_VERSION)\n assert has_gt_version(TMUX_MAX_VERSION), \"Greater than the max-supported version\"\n assert (\n \"%s-master\" % TMUX_MAX_VERSION == get_version()\n ), \"Is the latest supported version with -master appended\"\n\n\ndef test_allows_next_version(monkeypatch: pytest.MonkeyPatch) -> None:\n TMUX_NEXT_VERSION = str(float(TMUX_MAX_VERSION) + 0.1)\n\n class Hi:\n stdout: t.ClassVar = [f\"tmux next-{TMUX_NEXT_VERSION}\"]\n stderr = None\n\n def mock_tmux_cmd(*args: t.Any, **kwargs: t.Any) -> Hi:\n return Hi()\n\n monkeypatch.setattr(libtmux.common, \"tmux_cmd\", mock_tmux_cmd)\n\n assert has_minimum_version()\n assert has_gte_version(TMUX_MIN_VERSION)\n assert has_gt_version(TMUX_MAX_VERSION), \"Greater than the max-supported version\"\n assert get_version() == TMUX_NEXT_VERSION\n\n\ndef test_get_version_openbsd(monkeypatch: pytest.MonkeyPatch) -> None:\n class Hi:\n stderr: t.ClassVar = [\"tmux: unknown option -- V\"]\n\n def mock_tmux_cmd(*args: t.Any, **kwargs: t.Any) -> Hi:\n return Hi()\n\n monkeypatch.setattr(libtmux.common, \"tmux_cmd\", mock_tmux_cmd)\n monkeypatch.setattr(sys, \"platform\", \"openbsd 5.2\")\n assert has_minimum_version()\n assert has_gte_version(TMUX_MIN_VERSION)\n assert has_gt_version(TMUX_MAX_VERSION), \"Greater than the max-supported version\"\n assert (\n \"%s-openbsd\" % TMUX_MAX_VERSION == get_version()\n ), \"Is the latest supported version with -openbsd appended\"\n\n\ndef test_get_version_too_low(monkeypatch: pytest.MonkeyPatch) -> None:\n class Hi:\n stderr: t.ClassVar = [\"tmux: unknown option -- V\"]\n\n def mock_tmux_cmd(*args: t.Any, **kwargs: t.Any) -> Hi:\n return Hi()\n\n monkeypatch.setattr(libtmux.common, \"tmux_cmd\", mock_tmux_cmd)\n with pytest.raises(LibTmuxException) as exc_info:\n get_version()\n exc_info.match(\"is running tmux 1.3 or earlier\")\n\n\ndef test_ignores_letter_versions(monkeypatch: pytest.MonkeyPatch) -> None:\n \"\"\"Ignore letters such as 1.8b.\n\n See ticket https://github.com/tmux-python/tmuxp/issues/55.\n\n In version 0.1.7 this is adjusted to use LooseVersion, in order to\n allow letters.\n\n \"\"\"\n monkeypatch.setattr(libtmux.common, \"TMUX_MIN_VERSION\", \"1.9a\")\n result = has_minimum_version()\n assert result\n\n monkeypatch.setattr(libtmux.common, \"TMUX_MIN_VERSION\", \"1.8a\")\n result = has_minimum_version()\n assert result\n\n # Should not throw\n assert isinstance(has_version(\"1.8\"), bool)\n assert isinstance(has_version(\"1.8a\"), bool)\n assert isinstance(has_version(\"1.9a\"), bool)\n\n\ndef test_error_version_less_1_7(monkeypatch: pytest.MonkeyPatch) -> None:\n def mock_get_version() -> LooseVersion:\n return LooseVersion(\"1.7\")\n\n monkeypatch.setattr(libtmux.common, \"get_version\", mock_get_version)\n with pytest.raises(LibTmuxException) as excinfo:\n has_minimum_version()\n excinfo.match(r\"libtmux only supports\")\n\n with pytest.raises(LibTmuxException) as excinfo:\n has_minimum_version()\n\n excinfo.match(r\"libtmux only supports\")\n\n\ndef test_has_version() -> None:\n assert has_version(str(get_version()))\n\n\ndef test_has_gt_version() -> None:\n assert has_gt_version(\"1.6\")\n assert has_gt_version(\"1.6b\")\n\n assert not has_gt_version(\"4.0\")\n assert not has_gt_version(\"4.0b\")\n\n\ndef test_has_gte_version() -> None:\n assert has_gte_version(\"1.6\")\n assert has_gte_version(\"1.6b\")\n assert has_gte_version(str(get_version()))\n\n assert not has_gte_version(\"4.0\")\n assert not has_gte_version(\"4.0b\")\n\n\ndef test_has_lt_version() -> None:\n assert has_lt_version(\"4.0a\")\n assert has_lt_version(\"4.0\")\n\n assert not has_lt_version(\"1.7\")\n assert not has_lt_version(str(get_version()))\n\n\ndef test_has_lte_version() -> None:\n assert has_lte_version(\"4.0a\")\n assert has_lte_version(\"4.0\")\n assert has_lte_version(str(get_version()))\n\n assert not has_lte_version(\"1.7\")\n assert not has_lte_version(\"1.7b\")\n\n\ndef test_tmux_cmd_raises_on_not_found(monkeypatch: pytest.MonkeyPatch) -> None:\n monkeypatch.setenv(\"PATH\", \"\")\n with pytest.raises(TmuxCommandNotFound):\n tmux_cmd(\"-V\")\n\n\ndef test_tmux_cmd_unicode(session: Session) -> None:\n session.cmd(\"new-window\", \"-t\", 3, \"-n\", \"юникод\", \"-F\", \"Ελληνικά\")\n\n\n@pytest.mark.parametrize(\n \"session_name,raises,exc_msg_regex\",\n [\n (\"\", True, \"empty\"),\n (None, True, \"empty\"),\n (\"my great session.\", True, \"contains periods\"),\n (\"name: great session\", True, \"contains colons\"),\n (\"new great session\", False, None),\n (\"ajf8a3fa83fads,,,a\", False, None),\n ],\n)\ndef test_session_check_name(\n session_name: Optional[str], raises: bool, exc_msg_regex: Optional[str]\n) -> None:\n if raises:\n with pytest.raises(BadSessionName) as exc_info:\n session_check_name(session_name)\n if exc_msg_regex is not None:\n assert exc_info.match(exc_msg_regex)\n else:\n session_check_name(session_name)\n\n\ndef test_get_libtmux_version() -> None:\n from libtmux.__about__ import __version__\n\n version = get_libtmux_version()\n assert isinstance(version, LooseVersion)\n assert LooseVersion(__version__) == version\n","sub_path":"tests/legacy_api/test_common.py","file_name":"test_common.py","file_ext":"py","file_size_in_byte":6263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"155481937","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n#Author:xp\n#blog_url: http://blog.csdn.net/wuxingpu5/article/details/71209731\n\nfrom socket import *\nfrom threading import Thread\n\n# s=socket(AF_INET,SOCK_STREAM)\n# s.bind(('127.0.0.1',8080))\n# s.listen(5)\n\n# while True:\n# conn,addr=s.accept()\n#\n# while True:\n# res=conn.recv(1024)\n# print('client %s%s msg %s'%(addr[0],addr[1],res))\n# conn.send(res.upper())\n\ndef server(ip,port):\n\n s=socket(AF_INET,SOCK_STREAM)\n s.bind((ip,port))\n s.listen(5)\n\n\n while True:\n conn, addr = s.accept()\n print('client: ',conn,addr)\n t=Thread(target=talk,args=(conn,addr))\n t.start()\n\ndef talk(conn,addr):\n try:\n while True:\n res = conn.recv(1024)\n if not res:break\n print('client %s%s msg %s' % (addr[0], addr[1], res))\n conn.send(res.upper())\n except Exception:\n pass\n finally:\n conn.close()\n\nif __name__ == '__main__':\n server('127.0.0.1',8080)","sub_path":"day10/homework/bin/main_Thread并发socket.py","file_name":"main_Thread并发socket.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"216640808","text":"import collections\nimport ftrack_api\nfrom pype.modules.ftrack import BaseEvent\n\n\nclass PushFrameValuesToTaskEvent(BaseEvent):\n # Ignore event handler by default\n ignore_me = True\n\n cust_attrs_query = (\n \"select id, key, object_type_id, is_hierarchical, default\"\n \" from CustomAttributeConfiguration\"\n \" where key in ({}) and object_type_id in ({})\"\n )\n\n interest_entity_types = {\"Shot\"}\n interest_attributes = {\"frameStart\", \"frameEnd\"}\n interest_attr_mapping = {\n \"frameStart\": \"fstart\",\n \"frameEnd\": \"fend\"\n }\n _cached_task_object_id = None\n _cached_interest_object_ids = None\n\n @staticmethod\n def join_keys(keys):\n return \",\".join([\"\\\"{}\\\"\".format(key) for key in keys])\n\n @classmethod\n def task_object_id(cls, session):\n if cls._cached_task_object_id is None:\n task_object_type = session.query(\n \"ObjectType where name is \\\"Task\\\"\"\n ).one()\n cls._cached_task_object_id = task_object_type[\"id\"]\n return cls._cached_task_object_id\n\n @classmethod\n def interest_object_ids(cls, session):\n if cls._cached_interest_object_ids is None:\n object_types = session.query(\n \"ObjectType where name in ({})\".format(\n cls.join_keys(cls.interest_entity_types)\n )\n ).all()\n cls._cached_interest_object_ids = tuple(\n object_type[\"id\"]\n for object_type in object_types\n )\n return cls._cached_interest_object_ids\n\n def launch(self, session, event):\n interesting_data = self.extract_interesting_data(session, event)\n if not interesting_data:\n return\n\n entities = self.get_entities(session, interesting_data)\n if not entities:\n return\n\n entities_by_id = {\n entity[\"id\"]: entity\n for entity in entities\n }\n for entity_id in tuple(interesting_data.keys()):\n if entity_id not in entities_by_id:\n interesting_data.pop(entity_id)\n\n task_entities = self.get_task_entities(session, interesting_data)\n\n attrs_by_obj_id = self.attrs_configurations(session)\n if not attrs_by_obj_id:\n self.log.warning((\n \"There is not created Custom Attributes {}\"\n \" for \\\"Task\\\" entity type.\"\n ).format(self.join_keys(self.interest_attributes)))\n return\n\n task_entities_by_parent_id = collections.defaultdict(list)\n for task_entity in task_entities:\n task_entities_by_parent_id[task_entity[\"parent_id\"]].append(\n task_entity\n )\n\n missing_keys_by_object_name = collections.defaultdict(set)\n for parent_id, values in interesting_data.items():\n entities = task_entities_by_parent_id.get(parent_id) or []\n entities.append(entities_by_id[parent_id])\n\n for hier_key, value in values.items():\n changed_ids = []\n for entity in entities:\n key = self.interest_attr_mapping[hier_key]\n entity_attrs_mapping = (\n attrs_by_obj_id.get(entity[\"object_type_id\"])\n )\n if not entity_attrs_mapping:\n missing_keys_by_object_name[entity.entity_type].add(\n key\n )\n continue\n\n configuration_id = entity_attrs_mapping.get(key)\n if not configuration_id:\n missing_keys_by_object_name[entity.entity_type].add(\n key\n )\n continue\n\n changed_ids.append(entity[\"id\"])\n entity_key = collections.OrderedDict({\n \"configuration_id\": configuration_id,\n \"entity_id\": entity[\"id\"]\n })\n if value is None:\n op = ftrack_api.operation.DeleteEntityOperation(\n \"CustomAttributeValue\",\n entity_key\n )\n else:\n op = ftrack_api.operation.UpdateEntityOperation(\n \"ContextCustomAttributeValue\",\n entity_key,\n \"value\",\n ftrack_api.symbol.NOT_SET,\n value\n )\n\n session.recorded_operations.push(op)\n self.log.info((\n \"Changing Custom Attribute \\\"{}\\\" to value\"\n \" \\\"{}\\\" on entities: {}\"\n ).format(key, value, self.join_keys(changed_ids)))\n try:\n session.commit()\n except Exception:\n session.rollback()\n self.log.warning(\n \"Changing of values failed.\",\n exc_info=True\n )\n if not missing_keys_by_object_name:\n return\n\n msg_items = []\n for object_name, missing_keys in missing_keys_by_object_name.items():\n msg_items.append(\n \"{}: ({})\".format(object_name, self.join_keys(missing_keys))\n )\n\n self.log.warning((\n \"Missing Custom Attribute configuration\"\n \" per specific object types: {}\"\n ).format(\", \".join(msg_items)))\n\n def extract_interesting_data(self, session, event):\n # Filter if event contain relevant data\n entities_info = event[\"data\"].get(\"entities\")\n if not entities_info:\n return\n\n interesting_data = {}\n for entity_info in entities_info:\n # Care only about tasks\n if entity_info.get(\"entityType\") != \"task\":\n continue\n\n # Care only about changes of status\n changes = entity_info.get(\"changes\") or {}\n if not changes:\n continue\n\n # Care only about changes if specific keys\n entity_changes = {}\n for key in self.interest_attributes:\n if key in changes:\n entity_changes[key] = changes[key][\"new\"]\n\n if not entity_changes:\n continue\n\n # Do not care about \"Task\" entity_type\n task_object_id = self.task_object_id(session)\n if entity_info.get(\"objectTypeId\") == task_object_id:\n continue\n\n interesting_data[entity_info[\"entityId\"]] = entity_changes\n return interesting_data\n\n def get_entities(self, session, interesting_data):\n entities = session.query(\n \"TypedContext where id in ({})\".format(\n self.join_keys(interesting_data.keys())\n )\n ).all()\n\n output = []\n interest_object_ids = self.interest_object_ids(session)\n for entity in entities:\n if entity[\"object_type_id\"] in interest_object_ids:\n output.append(entity)\n return output\n\n def get_task_entities(self, session, interesting_data):\n return session.query(\n \"Task where parent_id in ({})\".format(\n self.join_keys(interesting_data.keys())\n )\n ).all()\n\n def attrs_configurations(self, session):\n object_ids = list(self.interest_object_ids(session))\n object_ids.append(self.task_object_id(session))\n\n attrs = session.query(self.cust_attrs_query.format(\n self.join_keys(self.interest_attr_mapping.values()),\n self.join_keys(object_ids)\n )).all()\n\n output = {}\n for attr in attrs:\n obj_id = attr[\"object_type_id\"]\n if obj_id not in output:\n output[obj_id] = {}\n output[obj_id][attr[\"key\"]] = attr[\"id\"]\n return output\n\n\ndef register(session, plugins_presets):\n PushFrameValuesToTaskEvent(session, plugins_presets).register()\n","sub_path":"pype/modules/ftrack/events/event_push_frame_values_to_task.py","file_name":"event_push_frame_values_to_task.py","file_ext":"py","file_size_in_byte":8183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"558171448","text":"import numpy as np\nfrom rest_framework.decorators import api_view\nfrom api.models import Movie, Rate, Profile, Matrix\nfrom django.contrib.auth.models import User\nfrom rest_framework.response import Response\nfrom rest_framework import status\n\nmovies = Movie.objects.all()\nnone = [x for x in range(1, movies[Movie.objects.count()-1].id+1)]\n\nclass MatrixFactorization():\n def __init__(self, R, k, learning_rate, reg_param, epochs, verbose=False):\n \"\"\"\n :param R: rating matrix\n :param k: latent parameter\n :param learning_rate: alpha on weight update\n :param reg_param: beta on weight update\n :param epochs: training epochs\n :param verbose: print status\n \"\"\"\n\n self._R = R\n self._num_users, self._num_items = R.shape\n self._k = k\n self._learning_rate = learning_rate\n self._reg_param = reg_param\n self._epochs = epochs\n self._verbose = verbose\n\n\n def fit(self):\n \"\"\"\n training Matrix Factorization : Update matrix latent weight and bias\n\n 참고: self._b에 대한 설명\n - global bias: input R에서 평가가 매겨진 rating의 평균값을 global bias로 사용\n - 정규화 기능. 최종 rating에 음수가 들어가는 것 대신 latent feature에 음수가 포함되도록 해줌.\n\n :return: training_process\n \"\"\"\n\n # init latent features\n self._P = np.random.normal(size=(self._num_users, self._k))\n self._Q = np.random.normal(size=(self._num_items, self._k))\n\n # init biases\n self._b_P = np.zeros(self._num_users)\n self._b_Q = np.zeros(self._num_items)\n self._b = np.mean(self._R[np.where(self._R != 0)])\n\n # train while epochs\n self._training_process = []\n for epoch in range(self._epochs):\n\n # rating이 존재하는 index를 기준으로 training\n for i in range(self._num_users):\n for j in range(self._num_items):\n if self._R[i, j] > 0:\n self.gradient_descent(i, j, self._R[i, j])\n cost = self.cost()\n self._training_process.append((epoch, cost))\n\n # print status\n if self._verbose == True and ((epoch + 1) % 10 == 0):\n print(\"Iteration: %d ; cost = %.4f\" % (epoch + 1, cost))\n\n\n def cost(self):\n \"\"\"\n compute root mean square error\n :return: rmse cost\n \"\"\"\n\n # xi, yi: R[xi, yi]는 nonzero인 value를 의미한다.\n # 참고: http://codepractice.tistory.com/90\n xi, yi = self._R.nonzero()\n predicted = self.get_complete_matrix()\n cost = 0\n for x, y in zip(xi, yi):\n cost += pow(self._R[x, y] - predicted[x, y], 2)\n return np.sqrt(cost) / len(xi)\n\n\n def gradient(self, error, i, j):\n \"\"\"\n gradient of latent feature for GD\n\n :param error: rating - prediction error\n :param i: user index\n :param j: item index\n :return: gradient of latent feature tuple\n \"\"\"\n\n dp = (error * self._Q[j, :]) - (self._reg_param * self._P[i, :])\n dq = (error * self._P[i, :]) - (self._reg_param * self._Q[j, :])\n return dp, dq\n\n\n def gradient_descent(self, i, j, rating):\n \"\"\"\n graident descent function\n\n :param i: user index of matrix\n :param j: item index of matrix\n :param rating: rating of (i,j)\n \"\"\"\n\n # get error\n prediction = self.get_prediction(i, j)\n error = rating - prediction\n\n # update biases\n self._b_P[i] += self._learning_rate * (error - self._reg_param * self._b_P[i])\n self._b_Q[j] += self._learning_rate * (error - self._reg_param * self._b_Q[j])\n\n # update latent feature\n dp, dq = self.gradient(error, i, j)\n self._P[i, :] += self._learning_rate * dp\n self._Q[j, :] += self._learning_rate * dq\n\n\n def get_prediction(self, i, j):\n \"\"\"\n get predicted rating: user_i, item_j\n :return: prediction of r_ij\n \"\"\"\n return self._b + self._b_P[i] + self._b_Q[j] + self._P[i, :].dot(self._Q[j, :].T)\n\n\n def get_complete_matrix(self):\n \"\"\"\n computer complete matrix PXQ + P.bias + Q.bias + global bias\n\n - PXQ 행렬에 b_P[:, np.newaxis]를 더하는 것은 각 열마다 bias를 더해주는 것\n - b_Q[np.newaxis:, ]를 더하는 것은 각 행마다 bias를 더해주는 것\n - b를 더하는 것은 각 element마다 bias를 더해주는 것\n\n - newaxis: 차원을 추가해줌. 1차원인 Latent들로 2차원의 R에 행/열 단위 연산을 해주기위해 차원을 추가하는 것.\n\n :return: complete matrix R^\n \"\"\"\n # 소수점을 없애기 위해 반올림한 평점으로 구성한다.\n return self._b + self._b_P[:, np.newaxis] + self._b_Q[np.newaxis:, ] + self._P.dot(self._Q.T)\n\n\n def print_results(self):\n \"\"\"\n print fit results\n \"\"\"\n \"\"\"\n print(\"User Latent P:\")\n print(self._P)\n print(\"Item Latent Q:\")\n print(self._Q.T)\n print(\"P x Q:\")\n print(self._P.dot(self._Q.T))\n print(\"bias:\")\n print(self._b)\n print(\"User Latent bias:\")\n print(self._b_P)\n print(\"Item Latent bias:\")\n print(self._b_Q)\n print(\"Final RMSE:\")\n print(self._training_process[self._epochs-1][1])\n \"\"\"\n # 결과 매트릭스를 DB에 저장하기\n # print(\"Final R matrix:\")\n # print(self.get_complete_matrix())\n result = self.get_complete_matrix().tolist()\n tmp = self._R.tolist()\n for i in range(1, len(result[0])):\n user = i\n movie_list = []\n for j in range(1, len(result)):\n if (j in none) or (tmp[j][i] != 0):\n pass\n else:\n movie_list.append((result[j][i], j))\n movie_list.sort()\n movie1 = Movie.objects.get(id=movie_list[0][1])\n movie2 = Movie.objects.get(id=movie_list[1][1])\n movie3 = Movie.objects.get(id=movie_list[2][1])\n movie4 = Movie.objects.get(id=movie_list[3][1])\n movie5 = Movie.objects.get(id=movie_list[4][1])\n movie6 = Movie.objects.get(id=movie_list[5][1])\n movie7 = Movie.objects.get(id=movie_list[6][1])\n movie8 = Movie.objects.get(id=movie_list[7][1])\n movie9 = Movie.objects.get(id=movie_list[8][1])\n movie10 = Movie.objects.get(id=movie_list[9][1])\n Matrix(UserID=Profile.objects.get(user=User.objects.get(pk=user)), Movie1=movie1, Movie2=movie2, Movie3=movie3, Movie4=movie4, Movie5=movie5, Movie6=movie6, Movie7=movie7, Movie8=movie8, Movie9=movie9, Movie10=movie10).save()\n \n \n \n \n\n\n@api_view(['GET'])\ndef matrix_factorization(request):\n \n ratings = [[0 for _ in range(Profile.objects.count()+1)] for _ in range(movies[Movie.objects.count()-1].id+1)]\n print(none)\n for movie in movies:\n tmp = movie.profile_movie.all()\n idx = movie.id\n none.remove(idx)\n if tmp:\n for t in tmp:\n ratings[idx][t.UserID.pk] = t.rating\n \n factorizer = MatrixFactorization(np.array(ratings), k=3, learning_rate=0.01, reg_param=0.01, epochs=300, verbose=True)\n factorizer.fit()\n factorizer.print_results()\n return Response(status=status.HTTP_200_OK)","sub_path":"Backend_Django/api/views/matrix_views.py","file_name":"matrix_views.py","file_ext":"py","file_size_in_byte":7552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"532115649","text":"# -*- coding: utf-8 -*-\n# UTF-8 encoding when using korean\n\n\"\"\"\n@description\n로봇 방문 순서에 따라 배열 m을 채워주는 함수\n\n@param m 로봇 방문 순서를 저장할 r행 c열의 배열, m[i][j] := (i행 j열)칸의 로봇의 방문 순서 번호\n@param r 행의 수\n@param c 열의 수\n\"\"\"\n\n\ndef is_out_of_range(exp_nowR, exp_nowC, max_r, max_c):\n if 0 <= exp_nowR < max_r and \\\n 0 <= exp_nowC < max_c:\n return False\n else:\n return True\n\n\ndef simulate(m, y, x):\n # begin of function\n nowX = 0\n nowY = 0\n length = y * x\n\n direction = [\n (1, 0),\n (0, 1),\n (-1, 0),\n (0, -1)\n ]\n\n curr_direction_idx = 0\n iteration = 1\n\n x_diff, y_diff = direction[curr_direction_idx]\n m[nowY][nowX] = iteration\n\n while iteration < length:\n exp_nowX = nowX + x_diff\n exp_nowY = nowY + y_diff\n\n if is_out_of_range(exp_nowX, exp_nowY, x, y) or \\\n m[exp_nowY][exp_nowX] > 0:\n curr_direction_idx = (curr_direction_idx + 1) % 4\n x_diff, y_diff = direction[curr_direction_idx]\n continue\n\n nowX = exp_nowX\n nowY = exp_nowY\n iteration += 1\n m[nowY][nowX] = iteration\n\n\n# end of function\n\n\ndef main():\n # 테스트케이스의 수를 입력받는다\n case_num = int(input())\n\n # 각 테스트케이스에 대해 순서대로 데이터를 입력받고 정답��� 출력한다\n for case_index in range(1, case_num + 1):\n\n # 행과 열의 수를 입력받는다\n r, c = [int(e) for e in input().split()]\n\n # 0으로 초기화 된 r행 c열의 리스트를 생성한다\n m = [[0] * c for row_index in range(r)]\n\n # 주어진 함수를 실행하여 각 칸을 로봇 청소기가 방문하는 순서를 리스트에 저장한다\n simulate(m, r, c)\n\n # 케이스 번호를 출력한다\n print('Case #%d' % case_index)\n\n # 각 칸의 방문 순서를 출력 형식에 맞게 출력한다\n for i in range(r):\n print(*m[i], sep=' ')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"probs/prob17_tracepath/tracepath.py","file_name":"tracepath.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"213420270","text":"#!/usr/bin/ python\n\n\"\"\"\nejercicio 5\nDeterminar cuánto debe pagar el cliente de un estacionamiento, el precio se determina\npor las horas que ocupo el estacionamiento.\n\"\"\"\n\nprecio_estacionamiento_hora = int(input(\"precio del estacionamiento por hora \"))\ntiempo_estacionado = float(input(\"colocar horas que el cliente estuvo estacionado \"))\n\nprint(\"el cliente debe pagar: \", precio_estacionamiento_hora * tiempo_estacionado, \"pesos\")\n","sub_path":"Trabajo Práctico 1/TP1_E05.py","file_name":"TP1_E05.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"384608587","text":"from base_model.resnet import *\nfrom base_model.wrn import *\n\nIMAGENET_MODEL_MAP = {\n 'resnet34':create_ResNet34,\n 'resnet50':create_ResNet50,\n 'resnet101':create_ResNet101,\n 'resnet152':create_ResNet152\n}\n\n\nCIFAR10_MODEL_MAP = {\n 'rc56':create_RC56,\n 'rc110':create_RC110,\n 'rc164':create_RC164,\n\n 'wrnc16plain':create_wrnc16plain,\n 'wrnc16drop':create_wrnc16drop,\n 'wrnc28plain':create_wrnc28plain,\n 'wrnc28drop':create_wrnc28drop,\n 'wrnc40plain':create_wrnc40plain,\n 'wrnc40drop':create_wrnc40drop,\n\n}\n\nCH_MODEL_MAP = {\n 'wrnh16plain':create_wrnh16plain,\n 'wrnh16drop':create_wrnh16drop,\n 'wrnh28plain':create_wrnh28plain,\n 'wrnh28drop':create_wrnh28drop,\n 'wrnh40plain':create_wrnh40plain,\n 'wrnh40drop':create_wrnh40drop,\n}\n\nSVHN_MODEL_MAP = {\n\n}\n\nDATASET_TO_MODEL_MAP = {\n 'imagenet': IMAGENET_MODEL_MAP,\n 'cifar10': CIFAR10_MODEL_MAP,\n 'ch': CH_MODEL_MAP, #ch for cifar-100\n 'svhn': SVHN_MODEL_MAP\n}\n\n\n# return the model creation function\ndef get_model_fn(dataset_name, model_name):\n return DATASET_TO_MODEL_MAP[dataset_name][model_name]\n","sub_path":"base_model/model_map.py","file_name":"model_map.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"268373700","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n# Python3 Standard or Private library\r\nimport pprint\r\nimport json\r\n\r\n# External library\r\nimport requests\r\n# Refer https://requests.readthedocs.io/en/master/_modules/requests/exceptions/\r\n# from requests.exceptions import *\r\nfrom requests.exceptions import ConnectionError, Timeout, HTTPError\r\n\r\n\r\nclass WebRequest:\r\n def __init__(self):\r\n self.__class_name = self.__class__.__name__\r\n self.__error = False\r\n self.__error_type = None\r\n self.__debug = False\r\n # request parameter/response\r\n self.res = None\r\n self.__auth = (None, None)\r\n self.__payload = None\r\n self.__proxies = None\r\n self.__headers = {\"Content-Type\": \"application/json\"} # For POST json\r\n self.__timeout = None # connection/read timeout Ex:(3.0, 3.0)\r\n self.__ssl_verify = False # Disable SSL certification check\r\n\r\n # ===== Method =====\r\n def set_error(self, error_type) -> None:\r\n self.__error = True\r\n self.__error_type = error_type\r\n\r\n def is_error(self) -> bool:\r\n return self.__error\r\n\r\n def get_request(self, uri, payload:dict):\r\n try:\r\n self.__payload = payload\r\n self.res = requests.get(url=uri,\r\n auth=self.__auth, # (user,pwd)\r\n params=payload,\r\n headers=None, # No header required\r\n proxies=self.__proxies,\r\n timeout=self.__timeout,\r\n verify=self.__ssl_verify,\r\n )\r\n # ステータスが成功以外(200番台以外)の場合、例外が発生。Redirectの場合、redirect先の結果で対応。\r\n self.res.raise_for_status()\r\n return self.res\r\n\r\n except (ConnectionError, Timeout, HTTPError) as e:\r\n self.set_error(type(e))\r\n return self.res\r\n\r\n def post_json(self, uri, payload:dict):\r\n try:\r\n self.__payload = payload\r\n self.res = requests.post(url=uri,\r\n auth=self.__auth, # (user,pwd)\r\n json=json.dumps(payload),\r\n headers=self.__headers,\r\n proxies=self.__proxies,\r\n timeout=self.__timeout,\r\n verify=self.__ssl_verify,\r\n )\r\n # ステータスが成功以外(200番台以外)の場合、例外が発生。Redirectの場合、redirect先の結果で対応。\r\n self.res.raise_for_status()\r\n return self.res\r\n\r\n except (ConnectionError, Timeout, HTTPError) as e:\r\n self.set_error(type(e))\r\n return self.res\r\n\r\n def post_data(self, uri, payload:dict=None, auth=None):\r\n try:\r\n self.__payload = payload\r\n self.res = requests.post(url=uri,\r\n auth=auth, # (user,pwd)\r\n data=payload,\r\n headers=None,\r\n proxies=self.__proxies,\r\n timeout=self.__timeout,\r\n verify=self.__ssl_verify,\r\n )\r\n # ステータスが成功以外(200番台以外)の場合、例外が発生。Redirectの場合、redirect先の結果で対応。\r\n self.res.raise_for_status()\r\n return self.res\r\n\r\n except (ConnectionError, Timeout, HTTPError) as e:\r\n self.set_error(type(e))\r\n return self.res\r\n\r\n # ===== Separator: Debug method =====\r\n def set_debug(self, mode: bool = True) -> None:\r\n self.__debug = mode\r\n\r\n def is_debug(self) -> bool:\r\n return self.__debug\r\n\r\n def show_me(self) -> None:\r\n print(F\"========== Class:{self.__class_name} ==============\")\r\n print(F'\\tis_error()= {self.is_error()}\\t{type(self.is_error())}')\r\n print(F'\\tself.__error_type={self.__error_type}\\t{type(self.__error_type)}')\r\n print(F'\\tis_debug()= {self.is_debug()}\\t{type(self.is_debug())}')\r\n print(F\"\\t===============================\")\r\n print(F'\\tself.__payload= {self.__payload}\\t{type(self.__payload)}')\r\n print(F'\\tself.res= {self.res}\\t{type(self.res)}')\r\n if self.res is not None:\r\n print(F'\\tself.res.status_code= {self.res.status_code}\\t{type(self.res.status_code)}')\r\n\r\n def show_response(self) -> None:\r\n print(\"== Response Status ==\")\r\n res = self.res\r\n print(F\" res={res}\\ttype={type(res)}\")\r\n if res is None:\r\n return\r\n print(F\"\\tres.status_code={res.status_code}\")\r\n print(F\"\\tres.url={res.url}\")\r\n print(F\"\\tres.encoding={res.encoding}\")\r\n\r\n print(\"== Response Header ==\")\r\n print(F\"\\tres.headers={res.headers}\\ttype={type(res.headers)}\")\r\n contents_type = res.headers['Content-Type']\r\n print(F\"\\tres.headers['Content-Type']={contents_type}\")\r\n print(F\"\\tres.headers['Content-Length']={res.headers['Content-Length']}\")\r\n\r\n print(F\"== Response Body:{contents_type} ==\")\r\n if \"json\" in contents_type:\r\n pprint.pprint(res.json(), indent=2)\r\n elif \"text\" in contents_type:\r\n text_list = res.text.splitlines()\r\n for i, text in enumerate(text_list):\r\n if i < 10:\r\n print(F\"\\t{i}:{text}\")\r\n else:\r\n print(F\"\\t== Body: type(res.content):{type(res.content)}\")\r\n return\r\n","sub_path":"web_request.py","file_name":"web_request.py","file_ext":"py","file_size_in_byte":5832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"241899002","text":"#**************************************************************************\n#* Copyright(c) 1998-2014, ALICE Experiment at CERN, All rights reserved. *\n#* *\n#* Author: The ALICE Off-line Project. *\n#* Contributors are mentioned in the code where appropriate. *\n#* *\n#* Permission to use, copy, modify and distribute this software and its *\n#* documentation strictly for non-commercial purposes is hereby granted *\n#* without fee, provided that the above copyright notice appears in all *\n#* copies and that both the copyright notice and this permission notice *\n#* appear in the supporting documentation. The authors make no claims *\n#* about the suitability of this software for any purpose. It is *\n#* provided \"as is\" without express or implied warranty. *\n#**************************************************************************\nfrom PWGJE.EMCALJetTasks.Tracks.analysis.base.SpectrumFitter import MinBiasFitter, TriggeredSpectrumFitter\nfrom scipy.optimize import fsolve\n\nclass PtReachCalculator(object):\n \"\"\"\n classdocs\n \"\"\"\n\n def __init__(self, name, data, isMinBias, limit):\n '''\n Constructor\n '''\n self.__fitter = None\n if isMinBias:\n self.__fitter = MinBiasFitter(name, data)\n else:\n self.__fitter = TriggeredSpectrumFitter(name, data)\n self.__limit = limit\n \n def GetPtReach(self, numberOfEvents):\n \"\"\"\n Get the Pt reach for a given number of events\n \"\"\"\n model = lambda p : numberOfEvents * self.__fitter.GetParameterisedValueAt(p) - self.__limit\n initialGuess = 10.\n result = fsolve(model, initialGuess)\n return result\n \n def GetPtReachForIntegral(self, numberOfEvents):\n \"\"\"\n Get the Pt reach for a given number of events using integrated yield above \n \"\"\"\n model = lambda p : numberOfEvents * self.__fitter.GetNormalisedIntegralAbove(p) - self.__limit\n initialGuess = 10.\n result = fsolve(model, initialGuess)\n return result\n","sub_path":"PWGJE/EMCALJetTasks/Tracks/analysis/util/PtReachCalculation.py","file_name":"PtReachCalculation.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"221532608","text":"#!/usr/bin/env python\n\n'''\nclient.py: simple clients for google storage. This first go uses\ndatastore for metadata, and Google Storage for images (objects) so\na client means a connection to both, with functions to interact with \nboth. Both will look for the environment variable GOOGLE_APPLICATION_CREDENTIALS\n\nCopyright (c) 2017 Vanessa Sochat\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n\n'''\n\nimport datetime\n\nfrom google.cloud import datastore\nfrom som.api.google.storage.utils import (\n get_google_service, \n get_bucket,\n upload_file\n)\n\nfrom som.api.google.storage.models import BatchManager\nfrom som.api import ApiConnection\nfrom som.logger import bot\n\n\nclass ClientBase(ApiConnection):\n\n def __init__(self,**kwargs):\n super(ApiConnection, self).__init__(**kwargs)\n self.datastore = datastore.Client(self.project_name)\n self.batch = BatchManager(client=self.datastore)\n self.storage = get_google_service('storage', 'v1')\n if self.bucket_name is not None:\n self.get_bucket()\n \n def get_bucket(self):\n self.bucket = get_bucket(self.storage,self.bucket_name)\n\n def make_key(self,key,ancestor=None):\n if ancestor is not None:\n ancestor = list(ancestor.key._flat_path)\n key = ancestor + key\n return self.datastore.key(*key)\n\n\n def put_object(self,bucket_folder,file_path,verbose=True,permission=None, mimetype=None):\n '''upload_object will upload a file to path bucket_path in storage\n '''\n return upload_file(storage_service=self.storage,\n bucket=self.bucket,\n mimetype=mimetype,\n bucket_path=bucket_folder,\n file_path=file_path,\n permission=permission,\n verbose=verbose)\n","sub_path":"som/api/google/storage/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"458313043","text":"import pandas as pd\nimport math\nimport os\nimport time\nfrom core import utils\n\nAOI_STRING_TO_AOI_NUMBER = {\n 'QUERY' : 0,\n 'RESULT1' : 1,\n 'RESULT2' : 2,\n 'RESULT3' : 3,\n 'RESULT4' : 4,\n 'RESULT5' : 5,\n 'RESULT6' : 6,\n 'NOT_AOI' : 7,\n}\n\nAOI_NUMBER_TO_AOI_STRING = {\n 0 : 'QUERY',\n 1 : 'RESULT1',\n 2 : 'RESULT2',\n 3 : 'RESULT3',\n 4 : 'RESULT4',\n 5 : 'RESULT5',\n 6 : 'RESULT6',\n 7 : 'NOT_AOI',\n}\n\nNUMBER_OF_AOI_TYPES = 8\nNUMBER_OF_PAGES = 31\n\ndef calculate(input_file):\n \"\"\"\n Calculate Refixation features per AOI.\n\n :param input_file: Fixation CSV file\n \"\"\"\n print(\"Calculating Refixation Features per AOI...\")\n pd_dataframe = pd.read_csv(input_file, sep=\",\", index_col=False)\n\n overwrite = \"y\"\n # Export to csv file\n # If file exists and user does not want to overwrite, do nothing\n # if (os.path.exists(input_file)):\n # overwrite = input(\"File \\\"{}\\\" exists. Would you like to overwrite? (Y/N): \".format(input_file).replace(\"\\\\\", \"/\"))\n\n if overwrite.lower() == \"y\":\n\n fixation_list = [[[] for i in range(NUMBER_OF_PAGES)] for j in range(NUMBER_OF_AOI_TYPES)]\n refixation_count = [[0 for i in range(NUMBER_OF_PAGES)] for j in range(NUMBER_OF_AOI_TYPES)]\n\n is_refixation_list = []\n\n df = pd.read_csv(input_file)\n\n num_rows = len(pd_dataframe.index)\n count = 0\n # print(\"Iterating over every row:\")\n starttime = time.time()\n\n for index, row in df.iterrows(): \n page = row['Page'] \n aoi_type = row['AOI_TYPE']\n numbered_aoi = AOI_STRING_TO_AOI_NUMBER[aoi_type]\n\n x_coord = row['X_Coordinate']\n y_coord = row['Y_Coordinate']\n\n if page < 31 and page > 0 and numbered_aoi < 7:\n if check_if_refixation(fixation_list, x_coord, y_coord, numbered_aoi, page):\n refixation_count[numbered_aoi][page] += 1\n is_refixation_list.append(1)\n else:\n is_refixation_list.append(0) \n fixation_list[numbered_aoi][page].append([x_coord, y_coord])\n else:\n is_refixation_list.append(0) \n \n curtime = time.time()\n elapsed_time = curtime - starttime\n count += 1\n progress = utils.progress_bar(count, num_rows, elapsed_time)\n print(progress, end=\"\\r\")\n\n print(\"\")\n\n #print(refixation_count)\n df['Is_Refixation'] = is_refixation_list\n df.to_csv(input_file, index=False)\n print(\"Finished exporting to {}\".\n format(input_file).replace(\"\\\\\", \"/\"))\n else:\n print(\"Exiting...\")\n\ndef check_if_refixation(fixation_list, x_coord, y_coord, numbered_aoi, page):\n existing_fixations = fixation_list[numbered_aoi][page]\n for coordinate in existing_fixations:\n x = coordinate[0]\n y = coordinate[1]\n\n x_square = pow(x - x_coord, 2)\n y_square = pow(y - y_coord, 2)\n\n distance = math.sqrt(x_square + y_square)\n\n if distance <= 10:\n return 1\n\n return 0","sub_path":"core/refixation.py","file_name":"refixation.py","file_ext":"py","file_size_in_byte":3178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"64586663","text":"import melon_utils as mu\nimport re\nimport datetime\nimport codecs\nimport csv\n\nnow = datetime.datetime.now()\nn = now.strftime('%Y%m%d')\n\nurl = \"https://www.melon.com/chart/\"\nmethod = 'get'\nhtml = mu.get_html(url, method)\n\nselector = 'tbody tr'\nhtml_tags = html.select(selector)\npattern = re.compile(\"'(.*)'\")\n\nsaveFile = './results/melon_top_100_list({}).csv'.format(n)\nprint(saveFile)\nwith codecs.open(saveFile, 'w', 'utf-8') as file:\n writer = csv.writer(file, delimiter=',', quotechar='\"')\n for html_tag in html_tags:\n rank = html_tag.select_one(\"span.rank\").text\n songId = html_tag.get('data-song-no')\n title = html_tag.select_one('div.ellipsis.rank01').text.strip()\n artist = \",\".join([artist_name.text for artist_name in html_tag.select('div.ellipsis.rank02 span a')])\n albumId = re.findall(pattern, html_tag.select_one('div.wrap a').get('href'))[0]\n albumTitle = html_tag.select_one('div.wrap a').get('title')\n\n result = [rank, songId, title, artist, albumId, albumTitle]\n print(rank, songId, title, artist, albumId, albumTitle)\n print(\"===================\")\n writer.writerow(result)\nprint(\"+++++++++++++++++++++++++++\", saveFile, \" saved +++++++++++++++++++++++++++\")","sub_path":"scraping/project_melon/get_top100_list.py","file_name":"get_top100_list.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"91553583","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Dec 24 23:26:10 2019\r\n\r\n@author: DCMC\r\n\"\"\"\r\nimport sys\r\nimport os\r\nimport numpy as np\r\nimport torch\r\n\r\nfrom eval import eval_net\r\nfrom resunet import ResidualUNet\r\nfrom utils.dataset import BasicDataset\r\nfrom torch.utils.data import DataLoader\r\nfrom train import train_net\r\n\r\ndef make_dataset(img_scale = 0.2):\r\n train_dataset, val_dataset = [], []\r\n for i in range(1, 4):\r\n dir_img = 'data/f0' + str(i) + '/imgs/'\r\n dir_mask = 'data/f0' + str(i) + '/masks/'\r\n dir_img_val = 'data/f0' + str(i) + '/imgs_val/'\r\n dir_mask_val = 'data/f0' + str(i) + '/masks_val/'\r\n train_dataset.append(BasicDataset(dir_img, dir_mask, img_scale) )\r\n val_dataset.append(BasicDataset(dir_img_val, dir_mask_val, img_scale) )\r\n \r\n return train_dataset, val_dataset\r\n\r\ndef cross_validation():\r\n train_dataset, val_dataset = make_dataset()\r\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') \r\n \r\n val_score = [] \r\n for i in range(3):\r\n net = ResidualUNet(n_channels=1, n_classes=1) # input R=G=B = gray scale\r\n net.to(device=device)\r\n \r\n # faster convolutions, but more memory\r\n torch.backends.cudnn.benchmark = True \r\n \r\n try:\r\n train_net(net,\r\n train_dataset[i],\r\n device,\r\n epochs=256,\r\n batch_size=4,\r\n lr=0.001,\r\n val_percent=0,\r\n save_cp=False,\r\n img_scale=0.2, \r\n data_augment=False)\r\n \r\n except KeyboardInterrupt:\r\n torch.save(net.state_dict(), './model/INTERRUPTED.pth')\r\n try:\r\n sys.exit(0)\r\n except SystemExit:\r\n os._exit(0) \r\n \r\n val_loader = DataLoader(val_dataset[i], batch_size=4, shuffle=False, num_workers=0, pin_memory=True)\r\n current_score = eval_net(net, val_loader, device, n_val = 20)\r\n val_score.append(current_score)\r\n \r\n print(\"\")\r\n print(val_score)\r\n return np.sum(val_score) / 3\r\n\r\nif __name__ == '__main__':\r\n # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\n # net = ResidualUNet(n_channels=1, n_classes=1) # input R=G=B = gray scale\r\n # net.to(device=device)\r\n # from torchsummary import summary\r\n # summary(net, input_size=(1, int(0.2 * 500), int(0.2 * 1200))) \r\n \r\n \r\n print('Average score = ' + str(cross_validation()))\r\n ","sub_path":"DIP/cross_validation.py","file_name":"cross_validation.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"646191917","text":"# Copyright (c) 2019-2020 CRS4\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\nFully connected network for tissue detection in histopathology images\n\"\"\"\n\nimport argparse\nimport sys\nimport numpy as np\n\nimport pyeddl.eddl as eddl\nimport pyecvl.ecvl as ecvl\nfrom pyeddl.tensor import Tensor\n\nimport models\n\ndef read_slide(slide_fn, level=4):\n levels = ecvl.OpenSlideGetLevels(slide_fn)\n dims = [0, 0] + levels[level]\n img = ecvl.OpenSlideRead(slide_fn, level, dims)\n t = ecvl.ImageToTensor(img)\n t_np = t.getdata()\n s = t_np.shape\n t_np = t_np.transpose((1,2,0)).reshape((s[1]*s[2], 3)) # Channel last and reshape\n t_eval = Tensor.fromarray(t_np) \n print (t_eval.getShape())\n return t_eval, s\n\n\ndef get_mask(prob_T_l, s, th):\n mask_np_l = []\n for prob_T in prob_T_l:\n output_np = prob_T.getdata()\n pred_np = np.zeros(output_np.shape[0])\n pred_np[output_np[:, 1]>th] = 1\n mask_values = pred_np\n mask_np_l.append(mask_values)\n\n mask_values = np.vstack(mask_np_l)\n mask = mask_values.reshape((s[1], s[2]))\n\n return mask\n\n\ndef main(args):\n slide_fn = args.slide_fn\n level = args.level\n\n ## Load model\n net = models.tissue_detector_DNN()\n eddl.build(\n net,\n eddl.rmsprop(0.00001),\n [\"soft_cross_entropy\"],\n [\"categorical_accuracy\"],\n eddl.CS_GPU() if args.gpu else eddl.CS_CPU()\n )\n\n eddl.load(net, args.weights_fn, \"bin\")\n eddl.summary(net)\n \n ## Load Slide\n slide_T, s = read_slide(slide_fn, level)\n\n ## Compute tissue mask\n #len_T = slide_T.getShape()[0]\n #bs = args.batch_size\n #nb = int(np.ceil((len_T / bs)))\n #print (\"n. batches: %d\" % nb)\n #output_l = []\n #for b in range(nb):\n # start = bs*b\n # stop = bs*(b+1) if bs*(b+1) < len_T else len_T\n # b_T = slide_T.select([\"%d:%d\" % (start, stop)])\n \n output_l = eddl.predict(net, [slide_T])\n \n print (output_l)\n ## Binarize the output\n mask = get_mask(output_l, s, args.threshold)\n np.save(\"mask\", mask)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument(\"slide_fn\", metavar=\"INPUT_DATASET\")\n parser.add_argument(\"--weights_fn\", type=str, metavar=\"MODEL FILENAME\", default=30)\n parser.add_argument(\"--level\", type=int, metavar=\"INT\", default=4)\n parser.add_argument(\"--batch-size\", type=int, metavar=\"INT\", default=2**24)\n parser.add_argument(\"--threshold\", type=float, metavar=\"THRESHOLD TO CONVERT PROB TO PREDICTIONS\", default=0.5)\n parser.add_argument(\"--gpu\", action=\"store_true\")\n main(parser.parse_args(sys.argv[1:]))\n","sub_path":"python/tissue_detector_inference.py","file_name":"tissue_detector_inference.py","file_ext":"py","file_size_in_byte":3670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"511506066","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport bs4\nimport re\nimport urllib\n\nimport utilities\nfrom base import Base, MalformedPageError, InvalidBaseError, loadable\n\nclass MalformedUserPageError(MalformedPageError):\n \"\"\"Indicates that a user-related page on MAL has irreparably broken markup in some way.\n \"\"\"\n pass\n\nclass InvalidUserError(InvalidBaseError):\n \"\"\"Indicates that the user requested does not exist on MAL.\n \"\"\"\n pass\n\nclass User(Base):\n \"\"\"Primary interface to user resources on MAL.\n \"\"\"\n _id_attribute = \"username\"\n\n @staticmethod\n def find_username_from_user_id(session, user_id):\n \"\"\"Look up a MAL username's user ID.\n\n :type session: :class:`myanimelist.session.Session`\n :param session: A valid MAL session.\n\n :type user_id: int\n :param user_id: The user ID for which we want to look up a username.\n\n :raises: :class:`.InvalidUserError`\n\n :rtype: str\n :return: The given user's username.\n \"\"\"\n comments_page = session.session.get(u'http://myanimelist.net/comments.php?' + urllib.urlencode({'id': int(user_id)})).text\n comments_page = bs4.BeautifulSoup(comments_page)\n username_elt = comments_page.find('h1')\n if \"'s Comments\" not in username_elt.text:\n raise InvalidUserError(user_id, message=\"Invalid user ID given when looking up username\")\n return username_elt.text.replace(\"'s Comments\", \"\")\n\n def __init__(self, session, username):\n \"\"\"Creates a new instance of User.\n\n :type session: :class:`myanimelist.session.Session`\n :param session: A valid MAL session\n :type username: str\n :param username: The desired user's username on MAL\n\n :raises: :class:`.InvalidUserError`\n\n \"\"\"\n super(User, self).__init__(session)\n self.username = username\n if not isinstance(self.username, unicode) or len(self.username) < 1:\n raise InvalidUserError(self.username)\n self._picture = None\n self._favorite_anime = None\n self._favorite_manga = None\n self._favorite_characters = None\n self._favorite_people = None\n self._last_online = None\n self._gender = None\n self._birthday = None\n self._location = None\n self._website = None\n self._join_date = None\n self._num_comments = None\n self._num_forum_posts = None\n self._num_reviews = None\n self._num_recommendations = None\n self._num_blog_posts = None\n self._num_clubs = None\n self._last_list_updates = None\n self._about = None\n self._anime_stats = None\n self._manga_stats = None\n self._reviews = None\n self._recommendations = None\n self._clubs = None\n self._friends = None\n\n def parse_sidebar(self, user_page):\n \"\"\"Parses the DOM and returns user attributes in the sidebar.\n\n :type user_page: :class:`bs4.BeautifulSoup`\n :param user_page: MAL user page's DOM\n\n :rtype: dict\n :return: User attributes\n\n :raises: :class:`.InvalidUserError`, :class:`.MalformedUserPageError`\n \"\"\"\n user_info = {}\n\n # if MAL says the user doesn't exist, raise an InvalidUserError.\n error_tag = user_page.find(u'div', {u'class': u'error404'})\n if error_tag:\n raise InvalidUserError(self.username)\n\n info_panel_first = user_page.find(u'div', {u'class': u'user-profile'})\n\n try:\n picture_tag = info_panel_first.find(u'img')\n user_info[u'picture'] = picture_tag.get(u'src').decode('utf-8') if picture_tag else None\n except:\n if not self.session.suppress_parse_exceptions:\n raise\n\n try:\n # the user ID is always present in the blogfeed link.\n blog_feed_link = info_panel_first.find(u'a', text=u'Blog Feed')\n user_info[u'id'] = int(blog_feed_link.get(u'href').split(u'&id=')[1])\n except:\n if not self.session.suppress_parse_exceptions:\n raise\n\n status_elts = info_panel_first.find_all(u'ul', {u'class': u'user-status'}, recursive=False)\n\n fields = [\n (u'last_online', u'Last Online', utilities.parse_profile_date),\n (u'gender', u'Gender', lambda x: x),\n (u'birthday', u'Birthday', utilities.parse_profile_date),\n (u'location', u'Location', lambda x: x),\n (u'join_date', u'Joined', utilities.parse_profile_date),\n ]\n general_elt = status_elts[0];\n for field_name, field_finder, parser in fields:\n field_elt = general_elt.find(u'span', text=field_finder)\n user_info[field_name] = None\n if field_elt:\n try:\n user_info[field_name] = parser(field_elt.nextSibling.text)\n except:\n if not self.session.suppress_parse_exceptions:\n raise\n if not user_info[u'gender']:\n user_info[u'gender'] = 'Not specified'\n\n fields = [\n (u'num_forum_posts', u'Forum Posts'),\n (u'num_reviews', u'Reviews'),\n (u'num_recommendations', u'Recommendations'),\n (u'num_blog_posts', u'Blog Posts'),\n (u'num_clubs', u'Clubs'),\n ]\n stats_elt = status_elts[2];\n for field_name, field_finder in fields:\n field_elt = stats_elt.find(u'span', text=field_finder)\n try:\n user_info[field_name] = int(field_elt.nextSibling.text.replace(',', ''))\n except:\n if not self.session.suppress_parse_exceptions:\n raise\n\n website_tag = info_panel_first.find(text='Also Available at')\n if website_tag:\n user_info[u'website'] = website_tag.parent.findNext(u'a').text\n\n return user_info\n\n def parse(self, user_page):\n \"\"\"Parses the DOM and returns user attributes in the main-content area.\n\n :type user_page: :class:`bs4.BeautifulSoup`\n :param user_page: MAL user page's DOM\n\n :rtype: dict\n :return: User attributes.\n\n \"\"\"\n user_info = self.parse_sidebar(user_page)\n\n section_headings = user_page.find_all(u'div', {u'class': u'normal_header'})\n\n # parse general details.\n try:\n num_comments_tag = user_page.find(u'a', text=re.compile(u'All Comments'))\n num_comments = re.search(u'\\((\\d+)\\)', num_comments_tag.text).group(1)\n user_info[u'num_comments'] = int(num_comments)\n except:\n if not self.session.suppress_parse_exceptions:\n raise\n\n # parse favorites\n favorites_tag = user_page.find(u'div', {u'class': u'user-favorites'})\n if favorites_tag:\n favorites_section = favorites_tag.find_all(u'div', recursive=False)\n\n try:\n favorite_anime_header = favorites_section[0]\n user_info[u'favorite_anime'] = []\n for elt in favorite_anime_header.find_all(u'li'):\n link_tag = elt.find_all(u'a')[1]\n link_parts = link_tag.get(u'href').split(u'.net')[1].split(u'/')\n # of the form /anime/467/Ghost_in_the_Shell:_Stand_Alone_Complex\n user_info[u'favorite_anime'].append(self.session.anime(int(link_parts[2])).set({u'title': link_tag.text}))\n except:\n if not self.session.suppress_parse_exceptions:\n raise\n\n try:\n favorite_manga_header = favorites_section[1]\n user_info[u'favorite_manga'] = []\n for elt in favorite_manga_header.find_all(u'li'):\n link_tag = elt.find_all(u'a')[1]\n link_parts = link_tag.get(u'href').split(u'.net')[1].split(u'/')\n # of the form /manga/467/Ghost_in_the_Shell:_Stand_Alone_Complex\n user_info[u'favorite_manga'].append(self.session.manga(int(link_parts[2])).set({u'title': link_tag.text}))\n except:\n if not self.session.suppress_parse_exceptions:\n raise\n\n try:\n favorite_character_header = favorites_section[2]\n user_info[u'favorite_characters'] = {}\n for elt in favorite_character_header.find_all(u'li'):\n link_tag = elt.find_all(u'a')[1]\n link_parts = link_tag.get(u'href').split(u'.net')[1].split(u'/')\n # of the form /character/467/Ghost_in_the_Shell:_Stand_Alone_Complex\n char = self.session.character(int(link_parts[2])).set({u'title': link_tag.text})\n media_link_tag = link_tag.nextSibling.find(u'a')\n media_link_parts = media_link_tag.get(u'href').split(u'/')\n # of the form /anime|manga/467/Ghost_in_the_Shell:_Stand_Alone_Complex\n anime = getattr(self.session, media_link_parts[1])(int(media_link_parts[2])).set({u'title': media_link_tag.text})\n user_info[u'favorite_characters'][char] = anime\n except:\n if not self.session.suppress_parse_exceptions:\n raise\n\n try:\n favorite_people_header = favorites_section[3]\n user_info[u'favorite_people'] = []\n for elt in favorite_people_header.find_all(u'li'):\n link_tag = elt.find_all(u'a')[1]\n link_parts = link_tag.get(u'href').split(u'.net')[1].split(u'/')\n # of the form /people/467/Ghost_in_the_Shell:_Stand_Alone_Complex\n user_info[u'favorite_people'].append(self.session.person(int(link_parts[2])).set({u'title': link_tag.text}))\n except:\n if not self.session.suppress_parse_exceptions:\n raise\n\n stats_tag = user_page.find(id='statistics')\n\n try:\n # last list updates.\n list_updates_header = filter(lambda x: u'Last List Updates' in x.text, section_headings)\n if list_updates_header:\n list_updates_header = list_updates_header[0]\n list_updates_table = list_updates_header.findNext(u'table')\n if list_updates_table:\n user_info[u'last_list_updates'] = {}\n for row in list_updates_table.find_all(u'tr'):\n cols = row.find_all(u'td')\n info_col = cols[1]\n media_link = info_col.find(u'a')\n link_parts = media_link.get(u'href').split(u'/')\n # of the form /(anime|manga)/10087/Fate/Zero\n if link_parts[1] == u'anime':\n media = self.session.anime(int(link_parts[2])).set({u'title': media_link.text})\n else:\n media = self.session.manga(int(link_parts[2])).set({u'title': media_link.text})\n list_update = {}\n progress_div = info_col.find(u'div', {u'class': u'spaceit_pad'})\n if progress_div:\n progress_match = re.match(r'(?P[A-Za-z]+)( at (?P[0-9]+) of (?P[0-9]+))?', progress_div.text).groupdict()\n list_update[u'status'] = progress_match[u'status']\n if progress_match[u'episodes'] is None:\n list_update[u'episodes'] = None\n else:\n list_update[u'episodes'] = int(progress_match[u'episodes'])\n if progress_match[u'total_episodes'] is None:\n list_update[u'total_episodes'] = None\n else:\n list_update[u'total_episodes'] = int(progress_match[u'total_episodes'])\n time_div = info_col.find(u'div', {u'class': u'lightLink'})\n if time_div:\n list_update[u'time'] = utilities.parse_profile_date(time_div.text)\n user_info[u'last_list_updates'][media] = list_update\n except:\n if not self.session.suppress_parse_exceptions:\n raise\n\n # anime stats.\n try:\n anime_stats_header = stats_tag.find(u'div', {u'class': u'stats anime'})\n stats = user_info['anime_stats'] = {}\n stats['Days'] = float(anime_stats_header.find(text=re.compile('Days')).parent.nextSibling)\n stats['Mean Score'] = float(anime_stats_header.find(text=re.compile('Mean Score')).parent.nextSibling)\n stats_tables = anime_stats_header.find_all(u'ul')\n # watching, completed, etc\n for metric in stats_tables[0].find_all(u'li'):\n stats[metric.find(u'a').text] = int(metric.find(u'span').text.replace(',',''))\n # total entries, rewatched, etc\n for metric in stats_tables[1].find_all(u'li'):\n parts = metric.find_all(u'span')\n stats[parts[0].text] = int(parts[1].text.replace(',',''))\n except:\n if not self.session.suppress_parse_exceptions:\n raise\n\n # manga stats.\n try:\n manga_stats_header = stats_tag.find(u'div', {u'class': u'stats manga'})\n stats = user_info['manga_stats'] = {}\n stats['Days'] = float(manga_stats_header.find(text=re.compile('Days')).parent.nextSibling)\n stats['Mean Score'] = float(manga_stats_header.find(text=re.compile('Mean Score')).parent.nextSibling)\n stats_tables = manga_stats_header.find_all(u'ul')\n # reading, completed, etc\n for metric in stats_tables[0].find_all(u'li'):\n stats[metric.find(u'a').text] = int(metric.find(u'span').text.replace(',',''))\n # total entries, reread, etc\n for metric in stats_tables[1].find_all(u'li'):\n parts = metric.find_all(u'span')\n stats[parts[0].text] = int(parts[1].text.replace(',',''))\n except:\n if not self.session.suppress_parse_exceptions:\n raise\n\n try:\n about_header = user_page.find(u'div', {u'class': u'profile-about-user'})\n if not about_header:\n user_info[u'about'] = u''\n else:\n user_info[u'about'] = about_header.find(u'div').text.strip()\n except:\n if not self.session.suppress_parse_exceptions:\n raise\n\n return user_info\n\n def parse_reviews(self, reviews_page):\n \"\"\"Parses the DOM and returns user reviews attributes.\n\n :type reviews_page: :class:`bs4.BeautifulSoup`\n :param reviews_page: MAL user reviews page's DOM\n\n :rtype: dict\n :return: User reviews attributes.\n\n \"\"\"\n user_info = self.parse_sidebar(reviews_page)\n second_col = reviews_page.find(u'div', {u'id': u'content'}).find(u'table').find(u'tr').find_all(u'td', recursive=False)[1]\n\n try:\n user_info[u'reviews'] = {}\n reviews = second_col.find_all(u'div', {u'class': u'borderDark'}, recursive=False)\n if reviews:\n for row in reviews:\n review_info = {}\n try:\n (meta_elt, review_elt) = row.find_all(u'div', recursive=False)[0:2]\n except ValueError:\n raise\n meta_rows = meta_elt.find_all(u'div', recursive=False)\n review_info[u'date'] = utilities.parse_profile_date(meta_rows[0].find(u'div').text)\n media_link = meta_rows[0].find(u'a')\n link_parts = media_link.get(u'href').split(u'/')\n # of the form /(anime|manga)/9760/Hoshi_wo_Ou_Kodomo\n media = getattr(self.session, link_parts[1])(int(link_parts[2])).set({u'title': media_link.text})\n\n helpfuls = meta_rows[1].find(u'span', recursive=False)\n helpful_match = re.match(r'(?P[0-9]+) of (?P[0-9]+)', helpfuls.text).groupdict()\n review_info[u'people_helped'] = int(helpful_match[u'people_helped'])\n review_info[u'people_total'] = int(helpful_match[u'people_total'])\n\n consumption_match = re.match(r'(?P[0-9]+) of (?P[0-9?]+)', meta_rows[2].text).groupdict()\n review_info[u'media_consumed'] = int(consumption_match[u'media_consumed'])\n if consumption_match[u'media_total'] == u'?':\n review_info[u'media_total'] = None\n else:\n review_info[u'media_total'] = int(consumption_match[u'media_total'])\n\n review_info[u'rating'] = int(meta_rows[3].find(u'div').text.replace(u'Overall Rating: ', ''))\n\n for x in review_elt.find_all([u'div', 'a']):\n x.extract()\n review_info[u'text'] = review_elt.text.strip()\n user_info[u'reviews'][media] = review_info\n except:\n if not self.session.suppress_parse_exceptions:\n raise\n\n return user_info\n\n def parse_recommendations(self, recommendations_page):\n \"\"\"Parses the DOM and returns user recommendations attributes.\n\n :type recommendations_page: :class:`bs4.BeautifulSoup`\n :param recommendations_page: MAL user recommendations page's DOM\n\n :rtype: dict\n :return: User recommendations attributes.\n\n \"\"\"\n user_info = self.parse_sidebar(recommendations_page)\n second_col = recommendations_page.find(u'div', {u'id': u'content'}).find(u'table').find(u'tr').find_all(u'td', recursive=False)[1]\n\n try:\n recommendations = second_col.find_all(u\"div\", {u\"class\": u\"spaceit borderClass\"})\n if recommendations:\n user_info[u'recommendations'] = {}\n for row in recommendations[1:]:\n anime_table = row.find(u'table')\n animes = anime_table.find_all(u'td')\n liked_media_link = animes[0].find(u'a', recursive=False)\n link_parts = liked_media_link.get(u'href').split(u'/')\n # of the form /anime|manga/64/Rozen_Maiden\n liked_media = getattr(self.session, link_parts[1])(int(link_parts[2])).set({u'title': liked_media_link.text})\n\n recommended_media_link = animes[1].find(u'a', recursive=False)\n link_parts = recommended_media_link.get(u'href').split(u'/')\n # of the form /anime|manga/64/Rozen_Maiden\n recommended_media = getattr(self.session, link_parts[1])(int(link_parts[2])).set({u'title': recommended_media_link.text})\n\n recommendation_text = row.find(u'p').text\n\n recommendation_menu = row.find(u'div', recursive=False)\n utilities.extract_tags(recommendation_menu)\n recommendation_date = utilities.parse_profile_date(recommendation_menu.text.split(u' - ')[1])\n\n user_info[u'recommendations'][liked_media] = {link_parts[1]: recommended_media, 'text': recommendation_text, 'date': recommendation_date}\n except:\n if not self.session.suppress_parse_exceptions:\n raise\n\n return user_info\n\n def parse_clubs(self, clubs_page):\n \"\"\"Parses the DOM and returns user clubs attributes.\n\n :type clubs_page: :class:`bs4.BeautifulSoup`\n :param clubs_page: MAL user clubs page's DOM\n\n :rtype: dict\n :return: User clubs attributes.\n\n \"\"\"\n user_info = self.parse_sidebar(clubs_page)\n second_col = clubs_page.find(u'div', {u'id': u'content'}).find(u'table').find(u'tr').find_all(u'td', recursive=False)[1]\n\n try:\n user_info[u'clubs'] = []\n\n club_list = second_col.find(u'ol')\n if club_list:\n clubs = club_list.find_all(u'li')\n for row in clubs:\n club_link = row.find(u'a')\n link_parts = club_link.get(u'href').split(u'?cid=')\n # of the form /clubs.php?cid=10178\n user_info[u'clubs'].append(self.session.club(int(link_parts[1])).set({u'name': club_link.text}))\n except:\n if not self.session.suppress_parse_exceptions:\n raise\n return user_info\n\n def parse_friends(self, friends_page):\n \"\"\"Parses the DOM and returns user friends attributes.\n\n :type friends_page: :class:`bs4.BeautifulSoup`\n :param friends_page: MAL user friends page's DOM\n\n :rtype: dict\n :return: User friends attributes.\n\n \"\"\"\n user_info = self.parse_sidebar(friends_page)\n second_col = friends_page.find(u'div', {u'id': u'content'}).find(u'table').find(u'tr').find_all(u'td', recursive=False)[1]\n\n try:\n user_info[u'friends'] = {}\n\n friends = second_col.find_all(u'div', {u'class': u'friendHolder'})\n if friends:\n for row in friends:\n block = row.find(u'div', {u'class': u'friendBlock'})\n cols = block.find_all(u'div')\n\n friend_link = cols[1].find(u'a')\n friend = self.session.user(friend_link.text)\n\n friend_info = {}\n if len(cols) > 2 and cols[2].text != u'':\n friend_info[u'last_active'] = utilities.parse_profile_date(cols[2].text.strip())\n\n if len(cols) > 3 and cols[3].text != u'':\n friend_info[u'since'] = utilities.parse_profile_date(cols[3].text.replace(u'Friends since', '').strip())\n user_info[u'friends'][friend] = friend_info\n except:\n if not self.session.suppress_parse_exceptions:\n raise\n\n return user_info\n\n def load(self):\n \"\"\"Fetches the MAL user page and sets the current user's attributes.\n\n :rtype: :class:`.User`\n :return: Current user object.\n\n \"\"\"\n user_profile = self.session.session.get(u'http://myanimelist.net/profile/' + utilities.urlencode(self.username)).text\n self.set(self.parse(utilities.get_clean_dom(user_profile)))\n return self\n\n def load_reviews(self):\n \"\"\"Fetches the MAL user reviews page and sets the current user's reviews attributes.\n\n :rtype: :class:`.User`\n :return: Current user object.\n\n \"\"\"\n page = 0\n # collect all reviews over all pages.\n review_collection = []\n while True:\n user_reviews = self.session.session.get(u'http://myanimelist.net/profile/' + utilities.urlencode(self.username) + u'/reviews&' + urllib.urlencode({u'p': page})).text\n parse_result = self.parse_reviews(utilities.get_clean_dom(user_reviews))\n if page == 0:\n # only set attributes once the first time around.\n self.set(parse_result)\n if len(parse_result[u'reviews']) == 0:\n break\n review_collection.append(parse_result[u'reviews'])\n page += 1\n\n # merge the review collections into one review dict, and set it.\n self.set({\n 'reviews': {k: v for d in review_collection for k,v in d.iteritems()}\n })\n return self\n\n def load_recommendations(self):\n \"\"\"Fetches the MAL user recommendations page and sets the current user's recommendations attributes.\n\n :rtype: :class:`.User`\n :return: Current user object.\n\n \"\"\"\n user_recommendations = self.session.session.get(u'http://myanimelist.net/profile/' + utilities.urlencode(self.username) + u'/recommendations').text\n self.set(self.parse_recommendations(utilities.get_clean_dom(user_recommendations)))\n return self\n\n def load_clubs(self):\n \"\"\"Fetches the MAL user clubs page and sets the current user's clubs attributes.\n\n :rtype: :class:`.User`\n :return: Current user object.\n\n \"\"\"\n user_clubs = self.session.session.get(u'http://myanimelist.net/profile/' + utilities.urlencode(self.username) + u'/clubs').text\n self.set(self.parse_clubs(utilities.get_clean_dom(user_clubs)))\n return self\n\n def load_friends(self):\n \"\"\"Fetches the MAL user friends page and sets the current user's friends attributes.\n\n :rtype: :class:`.User`\n :return: Current user object.\n\n \"\"\"\n user_friends = self.session.session.get(u'http://myanimelist.net/profile/' + utilities.urlencode(self.username) + u'/friends').text\n self.set(self.parse_friends(utilities.get_clean_dom(user_friends)))\n return self\n\n @property\n @loadable(u'load')\n def id(self):\n \"\"\"User ID.\n \"\"\"\n return self._id\n\n @property\n @loadable(u'load')\n def picture(self):\n \"\"\"User's picture.\n \"\"\"\n return self._picture\n\n @property\n @loadable(u'load')\n def favorite_anime(self):\n \"\"\"A list of :class:`myanimelist.anime.Anime` objects containing this user's favorite anime.\n \"\"\"\n return self._favorite_anime\n\n @property\n @loadable(u'load')\n def favorite_manga(self):\n \"\"\"A list of :class:`myanimelist.manga.Manga` objects containing this user's favorite manga.\n \"\"\"\n return self._favorite_manga\n\n @property\n @loadable(u'load')\n def favorite_characters(self):\n \"\"\"A dict with :class:`myanimelist.character.Character` objects as keys and :class:`myanimelist.media.Media` as values.\n \"\"\"\n return self._favorite_characters\n\n @property\n @loadable(u'load')\n def favorite_people(self):\n \"\"\"A list of :class:`myanimelist.person.Person` objects containing this user's favorite people.\n \"\"\"\n return self._favorite_people\n\n @property\n @loadable(u'load')\n def last_online(self):\n \"\"\"A :class:`datetime.datetime` object marking when this user was active on MAL.\n \"\"\" \n return self._last_online\n\n @property\n @loadable(u'load')\n def gender(self):\n \"\"\"This user's gender.\n \"\"\"\n return self._gender\n\n @property\n @loadable(u'load')\n def birthday(self):\n \"\"\"A :class:`datetime.datetime` object marking this user's birthday.\n \"\"\" \n return self._birthday\n\n @property\n @loadable(u'load')\n def location(self):\n \"\"\"This user's location.\n \"\"\"\n return self._location\n\n @property\n @loadable(u'load')\n def website(self):\n \"\"\"This user's website.\n \"\"\"\n return self._website\n\n @property\n @loadable(u'load')\n def join_date(self):\n \"\"\"A :class:`datetime.datetime` object marking when this user joined MAL.\n \"\"\" \n return self._join_date\n\n @property\n @loadable(u'load')\n def num_comments(self):\n \"\"\"The number of comments this user has made.\n \"\"\"\n return self._num_comments\n\n @property\n @loadable(u'load')\n def num_forum_posts(self):\n \"\"\"The number of forum posts this user has made.\n \"\"\"\n return self._num_forum_posts\n\n @property\n @loadable(u'load')\n def num_reviews(self):\n \"\"\"The number of reviews this user has made.\n \"\"\"\n return self._num_reviews\n\n @property\n @loadable(u'load')\n def num_recommendations(self):\n \"\"\"The number of recommendations this user has made.\n \"\"\"\n return self._num_recommendations\n\n @property\n @loadable(u'load')\n def num_blog_posts(self):\n \"\"\"The number of blog posts this user has made.\n \"\"\"\n return self._num_blog_posts\n\n @property\n @loadable(u'load')\n def num_clubs(self):\n \"\"\"The number of clubs this user has joined.\n \"\"\"\n return self._num_clubs\n\n @property\n @loadable(u'load')\n def last_list_updates(self):\n \"\"\"A dict of this user's last list updates, with keys as :class:`myanimelist.media.Media` objects, and values as dicts of attributes, e.g. {'status': str, 'episodes': int, 'total_episodes': int, 'time': :class:`datetime.datetime`}\n \"\"\"\n return self._last_list_updates\n\n @property\n @loadable(u'load')\n def about(self):\n \"\"\"This user's self-bio.\n \"\"\"\n return self._about\n\n @property\n @loadable(u'load')\n def anime_stats(self):\n \"\"\"A dict of this user's anime stats, with keys as strings, and values as numerics.\n \"\"\"\n return self._anime_stats\n\n @property\n @loadable(u'load')\n def manga_stats(self):\n \"\"\"A dict of this user's manga stats, with keys as strings, and values as numerics.\n \"\"\"\n return self._manga_stats\n\n @property\n @loadable(u'load_reviews')\n def reviews(self):\n \"\"\"A dict of this user's reviews, with keys as :class:`myanimelist.media.Media` objects, and values as dicts of attributes, e.g. \n\n {\n\n 'people_helped': int, \n\n 'people_total': int, \n\n 'media_consumed': int, \n\n 'media_total': int, \n\n 'rating': int, \n\n 'text': str, \n\n 'date': :class:`datetime.datetime`\n\n }\n\n \"\"\"\n return self._reviews\n\n @property\n @loadable(u'load_recommendations')\n def recommendations(self):\n \"\"\"A dict of this user's recommendations, with keys as :class:`myanimelist.media.Media` objects, and values as dicts of attributes, e.g.\n\n {\n\n 'anime|media': :class:`myanimelist.media.Media`, \n\n 'text': str, \n\n 'date': :class:`datetime.datetime`\n\n }\n \"\"\"\n return self._recommendations\n\n @property\n @loadable(u'load_clubs')\n def clubs(self):\n \"\"\"A list of :class:`myanimelist.club.Club` objects containing this user's club memberships.\n \"\"\"\n return self._clubs\n\n @property\n @loadable(u'load_friends')\n def friends(self):\n \"\"\"A dict of this user's friends, with keys as :class:`myanimelist.user.User` objects, and values as dicts of attributes, e.g. \n\n {\n\n 'last_active': :class:`datetime.datetime`, \n\n 'since': :class:`datetime.datetime`\n\n }\n \"\"\"\n return self._friends\n\n def anime_list(self):\n \"\"\"This user's anime list.\n\n :rtype: :class:`myanimelist.anime_list.AnimeList`\n :return: The desired anime list.\n \"\"\"\n return self.session.anime_list(self.username)\n\n def manga_list(self):\n \"\"\"This user's manga list.\n\n :rtype: :class:`myanimelist.manga_list.MangaList`\n :return: The desired manga list.\n \"\"\"\n return self.session.manga_list(self.username)\n","sub_path":"myanimelist/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":27648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"394252239","text":"\"\"\"Module to expose resources through a command-line interface.\"\"\"\n\nimport io\nimport json\nimport re\nimport readline\nimport roax.context\nimport roax.schema\nimport shlex\nimport shutil\nimport sys\nimport traceback\n\n\n_re = re.compile(\"(_*)(.*)(_*)\")\n\n\ndef _p2a(name):\n m = _re.match(name)\n return m.group(1) + m.group(2).replace(\"_\", \"-\") + m.group(3)\n\n\ndef _a2p(name):\n m = _re.match(name)\n return m.group(1) + m.group(2).replace(\"-\", \"_\") + m.group(3)\n\n\ndef _parse_redirects(args, body, returns):\n result = {}\n n = 0\n while n < len(args):\n redir = None\n if args[n] in (\"<\", \">\", \">>\"): # redirection as its own argument\n redir = args.pop(n)\n elif args[n].startswith(\">>\"):\n redir = \">>\"\n args[n] = args[n][2:]\n elif args[n].startswith(\"<\") or args[n].startswith(\">\"):\n redir = args[n][0]\n args[n] = args[n][1:]\n if not redir:\n n += 1\n continue\n try:\n filename = args.pop(n)\n except IndexError:\n raise ValueError(\"no redirection file name specified\")\n if \"<\" in filename or \">\" in filename:\n raise ValueError(\"invalid redirection file name\")\n if redir:\n result[redir] = filename\n return result\n\n\ndef _read(inp, schema):\n if isinstance(inp, io.TextIOWrapper):\n inp = inp.buffer\n return schema.bin_decode(inp.read())\n\n\ndef _write(out, schema, value):\n if isinstance(out, io.TextIOWrapper):\n out = out.buffer\n out.write(schema.bin_encode(value))\n out.flush()\n\n\ndef _summary(function):\n \"\"\"Return a summary line of text from usage docstring.\"\"\"\n return function.__doc__.splitlines()[1].lstrip().rstrip()\n\n\nclass _open_redirects:\n def __init__(self, inp, out, args, body, returns):\n self.redirs = _parse_redirects(args, body, returns)\n self.in_out = [inp, out]\n self.body_returns = [body, returns]\n\n def __enter__(self):\n modes = {\"<\": \"rb\", \">\": \"wb\", \">>\": \"ab\"}\n offset = {\"<\": 0, \">\": 1, \">>\": 1}\n for r in list(self.redirs):\n file = open(self.redirs[r], modes[r])\n self.in_out[offset[r]] = file\n self.redirs[r] = file\n return tuple(self.in_out)\n\n def __exit__(self, *args):\n for file in self.redirs.values():\n try:\n file.close()\n except:\n pass\n\n\nclass _Exit(Exception):\n pass\n\n\nclass CLI:\n \"\"\"\n Command line interface that exposes registered resources.\n\n Parameters and instance variables:\n • name: Name of the application.\n • debug: Print details for any raised exceptions.\n • err: Output stream for writing prompts and errors.\n • prefix: Prefix for parameters.\n • log: Log function to write log information.\n \"\"\"\n\n def __init__(\n self, name=None, *, debug=False, err=sys.stderr, prefix=\"--\", log=None\n ):\n super().__init__()\n self.name = name\n self.debug = debug\n self.err = err\n self.prefix = prefix\n self.log = log\n self.resources = {}\n self.commands = {}\n self.hidden = set()\n self._register_commands()\n\n def _check_not_registered(self, name):\n if name in self.resources:\n raise ValueError(f\"{name} is already a registered resource\")\n if name in self.commands:\n raise ValueError(f\"{name} is already a registered command\")\n\n def register_resource(self, name, resource, hidden=False):\n \"\"\"\n Register a resource with the command line interface.\n\n Parameters:\n • name: Name to expose for the resource via command line.\n • resource: Resource to be registered.\n • hidden: Hide the resource in help listings.\n \"\"\"\n self._check_not_registered(name)\n self.resources[name] = resource\n if hidden:\n self.hidden.add(name)\n\n def register_command(self, name, function, hidden=False):\n \"\"\"\n Register a command with the command line interface.\n\n Parameters:\n • name: Name to expose for the command via command line.\n • function: Function to call when command is invoked.\n • hidden: Hide the command in help listings.\n\n The command's docstring is required to have its usage on the first\n line, the summary description on the second line, and any further help\n documentation on subsequent lines.\n \n The command function requires an \"args\" parameter to accept arguments\n passed to it from the command line.\n \"\"\"\n self._check_not_registered(name)\n self.commands[name] = function\n if hidden:\n self.hidden.add(name)\n\n def loop(self, prompt=None):\n \"\"\"\n Repeatedly issue a command prompt and process input.\n\n Parameter:\n • prompt: Prompt to display for each command.\n \n The prompt can be a string or a callable to return a string containing the\n prompt to display.\n \"\"\"\n prompt = prompt or f\"{self.name or ''}> \"\n while True:\n try:\n self.process(input(prompt() if callable(prompt) else prompt))\n except (EOFError, KeyboardInterrupt):\n self._print()\n break\n except _Exit:\n break\n\n def process(self, line, inp=sys.stdin, out=sys.stdout):\n \"\"\"\n Process a single command line.\n \n Parameters:\n • line: Command line string to process.\n\n Returns:\n True if command line was processed successfully.\n \"\"\"\n try:\n if self.log:\n self.log(\"%s\", (line,))\n args = shlex.split(line)\n if not args:\n return True\n with roax.context.push(context=\"roax.cli\", command=line):\n name = args.pop(0)\n if name in self.resources:\n return self._process_resource(name, args, inp, out)\n elif name in self.commands:\n return self.commands[name](args)\n else:\n self._print(f\"Invalid command or resource: {name}.\")\n return False\n except _Exit:\n raise\n except Exception as e:\n if self.log:\n self.log(\"%s\", (e,), exc_info=self.debug)\n self._print(f\"ERROR: {e}\")\n if self.debug:\n traceback.print_exc()\n return False\n\n def _register_commands(self):\n self.register_command(\"help\", self._help)\n self.register_command(\"exit\", self._exit)\n self.register_command(\"quit\", self._exit, hidden=True)\n self.register_command(\"q\", self._exit, hidden=True)\n self.register_command(\"debug\", self._debug, hidden=True)\n\n def _help(self, args):\n \"\"\"\\\n Usage: help [resource [operation] | command]\n Provide help with commands and resources.\\\n \"\"\"\n name = args.pop(0) if args else None\n if not name:\n return self._help_list()\n elif name in self.resources:\n return self._help_resource(name, args)\n elif name in self.commands:\n return self._help_command(name)\n self._print(f\"Unrecognized resource or command: {name}.\")\n return False\n\n def _exit(self, args):\n \"\"\"\\\n Usage: exit\n Exit the command line interface.\\\n \"\"\"\n raise _Exit\n\n def _debug(self, args):\n \"\"\"\\\n Usage: debug [on|off]\n Enable, disable or print debugging status.\\\n \"\"\"\n if args and args[0] == \"on\":\n self.debug = True\n elif args and args[0] == \"off\":\n self.debug = False\n elif args:\n self._help_command(\"debug\")\n return False\n print(f'Debugging status: {\"on\" if self.debug else \"off\"}.')\n return True\n\n def _print(self, *args, **varargs):\n if self.err:\n print(*args, file=self.err, **varargs)\n\n def _print_listing(self, listing, indent=\"\", space=4, max_column=24):\n \"\"\"Sort a dictionary by key and print as a listing.\"\"\"\n names = sorted(listing.keys())\n ljust = 0\n for name in names:\n if len(name) <= max_column and len(name) > ljust:\n ljust = len(name)\n for name in names:\n self._print(f'{indent}{name.ljust(ljust)}{\" \" * space}{listing[name]}')\n\n def _help_command(self, name):\n \"\"\"Print the function docstring of a command as help text.\"\"\"\n self._print(textwrap.dedent(self.commands[name].__doc__))\n return False\n\n def _parse_arguments(self, params, args):\n \"\"\"Parse arguments for supported operation parameters.\"\"\"\n result = {}\n args = list(args)\n name = None\n while args:\n arg = args.pop(0)\n if name is None:\n if not arg.startswith(self.prefix):\n raise ValueError\n arg = arg[len(self.prefix) :]\n name, value = arg.split(\"=\", 1) if \"=\" in arg else (arg, None)\n name = _a2p(name)\n if name == \"_body\" or name not in params.props:\n raise ValueError\n if value:\n result[name] = value\n name = None\n else:\n result[name] = arg\n name = None\n if name: # parameter name supplied without value\n raise ValueError\n return result\n\n def _process_resource(self, resource_name, args, inp, out):\n \"\"\"Process a command for a resource.\"\"\"\n resource = self.resources[resource_name]\n operation_name = _a2p(args.pop(0)) if args else None\n operation = resource.operations.get(operation_name)\n if not operation:\n return self._help_resource(resource_name)\n params = operation.params\n returns = operation.returns\n body = params.props.get(\"_body\")\n with _open_redirects(inp, out, args, body, returns) as (inp, out):\n try:\n parsed = self._parse_arguments(params, args)\n except ValueError:\n return self._help_operation(resource_name, operation)\n try:\n for name in parsed:\n parsed[name] = params.props[name].str_decode(parsed[name])\n for name in params.props:\n if (\n name != \"_body\"\n and name in params.required\n and name not in parsed\n ):\n raise roax.schema.SchemaError(\"missing required parameter\")\n if body:\n name = \"{body}\"\n description = (body.description or f\"{name}.\").lower()\n if inp == sys.stdin:\n self._print(f\"Enter {description}\")\n self._print(\n \"When complete, input EOF (*nix: Ctrl-D, Windows: Ctrl-Z+Return):\"\n )\n else:\n self._print(\n f'Reading body from {getattr(inp, \"name\", \"stream\")}...'\n )\n if isinstance(body, roax.schema.reader):\n parsed[\"_body\"] = inp\n else:\n parsed[\"_body\"] = _read(inp, body)\n name = None\n result = operation.call(**parsed)\n except roax.schema.SchemaError as se:\n if name:\n se.push(_p2a(name))\n self._help_operation(resource_name, operation)\n raise\n self._print(\"SUCCESS.\")\n if returns:\n description = (returns.description or \"response.\").lower()\n if out is not sys.stdout:\n self._print(\n f'Writing response to {getattr(out, \"name\", \"stream\")}...'\n )\n if isinstance(returns, roax.schema.reader):\n shutil.copyfileobj(result, out)\n result.close()\n else:\n _write(out, returns, result)\n if out is sys.stdout:\n self._print()\n return True\n\n def _help_list(self):\n \"\"\"List all available resources and commands.\"\"\"\n self._print(\"Available resources:\")\n resources = {\n k: self.resources[k].description\n for k in self.resources\n if k not in self.hidden\n }\n self._print_listing(resources, indent=\" \")\n self._print(\"Available commands:\")\n commands = {\n k: _summary(self.commands[k]) for k in self.commands if k not in self.hidden\n }\n self._print_listing(commands, indent=\" \")\n return False\n\n def _help_resource(self, resource_name, args=None):\n \"\"\"Provide operations that are available for a specific resource.\"\"\"\n operation_name = _a2p(args.pop(0)) if args else None\n operation = self.resources[resource_name].operations.get(operation_name)\n if operation:\n return self._help_operation(resource_name, operation)\n self._print(f\"Usage: {resource_name} operation [ARGS] [OUTFILE]\")\n self._print(f\" {self.resources[resource_name].description}\")\n self._print(\"Operations:\")\n ops = self.resources[resource_name].operations.values()\n operations = {_p2a(o.name): o.summary for o in ops}\n self._print_listing(operations, indent=\" \")\n return False\n\n def _help_operation(self, resource_name, operation):\n \"\"\"Provide detailed help message for specific operation.\"\"\"\n params = operation.params\n usage = []\n listing = {}\n for name in (n for n in params if n != \"_body\"):\n param = params[name]\n munged = _p2a(name)\n arg = f\"{self.prefix}{munged}={param.python_type.__name__.upper()}\"\n item = param.description or \"\"\n if param.enum:\n item += (\n \" {\"\n + \",\".join((param.str_encode(e) for e in sorted(param.enum)))\n + \"}\"\n )\n if param.default is not None:\n item += f\" (default: {param.str_encode(param.default)})\"\n listing[f\"{self.prefix}{munged}\"] = item\n if name not in params.required:\n arg = f\"[{arg}]\"\n usage.append(arg)\n self._print(f'Usage: {resource_name} {_p2a(operation.name)} {\" \".join(usage)}')\n self._print(f\" {operation.summary}\")\n if listing:\n self._print(\"Arguments:\")\n self._print_listing(listing, indent=\" \")\n if \"_body\" in params:\n description = params[\"_body\"].description\n if description:\n self._print(f\"Body: {description}\")\n if operation.returns:\n description = operation.returns.description\n if description:\n self._print(f\"Response: {description}\")\n return False\n","sub_path":"roax/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":15312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"337993542","text":"import asyncio\nimport json\n\nimport re\nimport uuid\n\nimport aioamqp\nfrom asynclib.amqp import logger\nfrom asynclib.http.error import BaseError\n\n\nclass AMQPClient(object):\n def __init__(self, name='', prefix='am', host='localhost', user='guest', password='guest',\n vhost='/', exchange_type='topic', dumper=json, prefetch_count=1):\n if not prefix.endswith('_'):\n prefix += '_'\n if len(name) > 0 and not name.endswith('_'):\n name += '_'\n self.credentials = {\n 'host': host,\n 'login': user,\n 'password': password,\n 'virtual_host': vhost\n }\n self.dumper = dumper\n self.prefix = prefix\n self.prefetch_count = prefetch_count\n self.name = name\n self.exchange_name = self.prefix + 'exchange_' + exchange_type\n self.queue_name = self.prefix + name + 'queue_' + exchange_type\n self.transport = None\n self.protocol = None\n self.channel = None\n self.exchange_type = exchange_type\n self.waiter = asyncio.Event()\n self._reply_queue = None\n\n async def connect(self):\n self.transport, self.protocol = await aioamqp.connect(**self.credentials)\n self.channel = await self.protocol.channel()\n\n async def __aenter__(self):\n await self.connect()\n\n async def __aexit__(self, exc_type, exc_val, exc_tb):\n await self.close()\n\n def __await__(self):\n return self.__aenter__().__await__()\n\n async def close(self):\n await self.protocol.close()\n self.transport.close()\n\n async def get_reply_queue(self):\n if self._reply_queue is None:\n self._reply_queue = (await self.channel.queue_declare(exclusive=True))['queue']\n return self._reply_queue\n\n async def on_reply(self, channel, body, envelope, properties):\n if properties.correlation_id == self.correlation_id:\n self.response = self.dumper.loads(body)\n self.waiter.set()\n\n async def call(self, key, **kwargs):\n self.correlation_id = str(uuid.uuid4())\n reply = await self.get_reply_queue()\n routing_key = (self.prefix + self.name).replace('_', '.') + key\n body = self.dumper.dumps({'parameters': kwargs})\n await self.channel.basic_consume(self.on_reply, queue_name=reply)\n await self.channel.basic_publish(body,\n self.exchange_name, routing_key=routing_key, properties={\n 'correlation_id': self.correlation_id,\n 'reply_to': reply\n }\n )\n await self.waiter.wait()\n if self.response.get('error') is not None:\n raise BaseError.init_with(self.response['error'])\n return self.response['result']\n\n\n async def publish(self, _name, *args, **kwargs):\n routing_key = (self.prefix + self.name).replace('_', '.') + _name\n body = self.dumper.dumps({'args': args, 'kwargs': kwargs})\n self.channel.basic_publish(body, self.exchange_name, routing_key=routing_key)\n logger.debug('Message %s with routing key %s published', body, routing_key)\n\n","sub_path":"asynclib/amqp/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"199042918","text":"from pytorch_lightning.profiler import Profiler, AdvancedProfiler\nimport time\nimport numpy as np\n\n\ndef test_simple_profiler():\n p = Profiler()\n\n with p.profile(\"a\"):\n time.sleep(3)\n\n with p.profile(\"a\"):\n time.sleep(1)\n\n with p.profile(\"b\"):\n time.sleep(2)\n\n with p.profile(\"c\"):\n time.sleep(1)\n\n # different environments have different precision when it comes to time.sleep()\n np.testing.assert_almost_equal(p.recorded_durations[\"a\"], [3, 1], decimal=1)\n np.testing.assert_almost_equal(p.recorded_durations[\"b\"], [2], decimal=1)\n np.testing.assert_almost_equal(p.recorded_durations[\"c\"], [1], decimal=1)\n\n\ndef test_advanced_profiler():\n def get_duration(profile):\n return sum([x.totaltime for x in profile.getstats()])\n\n p = AdvancedProfiler()\n\n with p.profile(\"a\"):\n time.sleep(3)\n\n with p.profile(\"a\"):\n time.sleep(1)\n\n with p.profile(\"b\"):\n time.sleep(2)\n\n with p.profile(\"c\"):\n time.sleep(1)\n\n a_duration = get_duration(p.profiled_actions[\"a\"])\n np.testing.assert_almost_equal(a_duration, [4], decimal=1)\n b_duration = get_duration(p.profiled_actions[\"b\"])\n np.testing.assert_almost_equal(b_duration, [2], decimal=1)\n c_duration = get_duration(p.profiled_actions[\"c\"])\n np.testing.assert_almost_equal(c_duration, [1], decimal=1)\n","sub_path":"tests/test_profiler.py","file_name":"test_profiler.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"596226484","text":"#!/usr/bin/python3\n\nimport math\n\ndef recipe_batches(recipe, ingredients):\n # initialize batch amount = 0 and get list of keys for ingredients of recipe\n batches = 0\n ing_keys = list(recipe.keys())\n\n\n # iterate through recipe and ingredients\n for i in range(len(ing_keys)):\n rec_quantity = recipe.get(ing_keys[i])\n ing_quantity = ingredients.get(ing_keys[i])\n\n # if no ingredient, set it 0\n if ing_quantity == None:\n ing_quantity = 0\n\n # Check if there are more of an ingredient than what the recipe requires; if so, divide the ingredient quantity by recipe quantity to find possible amount of batches for given ingredient\n if rec_quantity <= ing_quantity:\n amount = ing_quantity // rec_quantity\n # Set amount of batches for first\n if i == 0:\n batches = amount\n # If the amount is smaller than previous, set possible batches to lowest\n elif amount < batches:\n batches = amount\n else:\n batches = 0\n\n return batches\n\nif __name__ == '__main__':\n # Change the entries of these dictionaries to test \n # your implementation with different inputs\n recipe = { 'milk': 100, 'butter': 50, 'flour': 5 }\n ingredients = { 'milk': 132, 'butter': 50, 'flour': 51 }\n print(\"{batches} batches can be made from the available ingredients: {ingredients}.\".format(batches=recipe_batches(recipe, ingredients), ingredients=ingredients))","sub_path":"recipe_batches/recipe_batches.py","file_name":"recipe_batches.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"479273596","text":"from celery import task\nfrom text_style_transfer.models import WEIGHTS_PATH, TrainModelRequest, \\\n StyleTransferRequest\nfrom text_style_transfer.neural_network import TextStyleTransfer\n\n\nDEFAULT_TRAINING_DATA = 'https://s3.amazonaws.com/text-datasets/nietzsche.txt'\n\n@task\ndef train_model(train_model_request_id, train_data_url=DEFAULT_TRAINING_DATA):\n train_model_request = TrainModelRequest.objects.filter(id=train_model_request_id).first()\n try:\n train_model_request.status = TrainModelRequest.IN_PROGRESS\n train_model_request.log = \"Training in progress\\n\"\n train_model_request.save()\n tst = TextStyleTransfer(train_data_url, WEIGHTS_PATH)\n tst.train(20)\n train_model_request.log += \"Completed\"\n train_model_request.status = TrainModelRequest.COMPLETED\n except Exception as err:\n import traceback\n err_message = traceback.format_exc()\n train_model_request.log += \"Exception: {}.\\n{}\\n\".format(err, err_message)\n train_model_request.log += \"Failed\"\n train_model_request.status = TrainModelRequest.FAILED\n finally:\n train_model_request.save()\n\n\n@task\ndef transfer_style(style_transfer_request_id, text):\n style_transfer_request = StyleTransferRequest.objects.filter(id=style_transfer_request_id).first()\n try:\n text = text.replace('\\r', '').replace('\\n', ' ')\n train_data_url = TrainModelRequest.objects.filter(\n status=TrainModelRequest.COMPLETED\n ).order_by(\n \"-record_created\"\n ).values_list(\n 'train_data_url', flat=True\n ).first() or DEFAULT_TRAINING_DATA\n\n style_transfer_request.status = TrainModelRequest.IN_PROGRESS\n style_transfer_request.log = \"Training in progress\\n\"\n style_transfer_request.save()\n tst = TextStyleTransfer(train_data_url, WEIGHTS_PATH)\n blanks = tst.fill_blanks(text)\n res = []\n blanks_i = 0\n for word in text.split(\" \"):\n if word == \"_\":\n res.append(blanks[blanks_i])\n blanks_i += 1\n else:\n res.append(word)\n style_transfer_request.result_text = \" \".join(res)\n style_transfer_request.log += \"Completed\"\n style_transfer_request.status = TrainModelRequest.COMPLETED\n except Exception as err:\n import traceback\n err_message = traceback.format_exc()\n style_transfer_request.log += \"Exception: {}.\\n{}\\n\".format(err, err_message)\n style_transfer_request.log += \"Failed\"\n style_transfer_request.status = TrainModelRequest.FAILED\n finally:\n style_transfer_request.save()\n","sub_path":"text_style_transfer/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"136362224","text":"import unittest\n\nfrom backend.corpora.common.corpora_orm import DbDatasetProcessingStatus, UploadStatus\nfrom backend.corpora.common.entities.dataset import Entity\n\n\nclass TestEntity(unittest.TestCase):\n def test__create_sub_object(self):\n test_params = {\"row\": {\"upload_status\": UploadStatus.WAITING}, \"db_table\": DbDatasetProcessingStatus}\n\n with self.subTest(test_params):\n result = Entity._create_sub_object(**test_params)\n self.assertIsInstance(result, DbDatasetProcessingStatus)\n self.assertEqual(UploadStatus.WAITING, result.upload_status)\n\n test_params.update(add_columns=dict(dataset_id=\"test_dataset_id\"))\n with self.subTest(test_params):\n result = Entity._create_sub_object(**test_params)\n self.assertIsInstance(result, DbDatasetProcessingStatus)\n self.assertEqual(\"test_dataset_id\", result.dataset_id)\n self.assertEqual(UploadStatus.WAITING, result.upload_status)\n\n test_params = {\"row\": {\"fake_row\": UploadStatus.WAITING}, \"db_table\": DbDatasetProcessingStatus}\n with self.subTest(test_params):\n with self.assertRaises(TypeError):\n Entity._create_sub_object(**test_params)\n","sub_path":"tests/unit/backend/corpora/common/entities/test_entity.py","file_name":"test_entity.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"450778518","text":"from .base import *\n\nDEBUG = False\n\nSECRET_KEY = '{{ vault_django_secret }}'\n\nADMINS = (\n ('WCIVF Developers', 'developers@democracyclub.org.uk'),\n)\n\nMANAGERS = ADMINS\n\nALLOWED_HOSTS = [\n \"*\"\n]\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.contrib.gis.db.backends.postgis',\n 'NAME': '{{ project_name }}',\n 'USER': '',\n 'PASSWORD': '',\n 'HOST': '',\n 'PORT': '',\n 'CONN_MAX_AGE': 300,\n },\n 'logger': {\n 'ENGINE': 'django.contrib.gis.db.backends.postgis',\n 'NAME': 'wcivf_logger',\n 'USER': 'wcivf',\n 'PASSWORD': '{{ vault_logger_db_password }}',\n 'HOST': '{{ vault_logger_db_host }}',\n 'PORT': '',\n }\n\n}\n\nDATABASE_ROUTERS = [\n 'core.db_routers.LoggerRouter',\n]\n\nSESSION_ENGINE = \"django.contrib.sessions.backends.signed_cookies\"\n\nWDIV_API_KEY = \"{{ vault_wdiv_api_key }}\"\nSLACK_FEEDBACK_WEBHOOK_URL = \"{{ vault_slack_feedback_webhook_url }}\" # noqa\n\n\nGOCARDLESS_APP_ID=\"{{ vault_gocardless_app_id }}\"\nGOCARDLESS_APP_SECRET=\"{{ vault_gocardless_app_secret }}\"\nGOCARDLESS_ACCESS_TOKEN=\"{{ vault_gocardless_access_token }}\"\nGOCARDLESS_MERCHANT_ID=\"{{ vault_gocardless_merchant_id }}\"\nGOCARDLESS_ACCESS_TOKEN = \"{{ vault_gocardless_access_token }}\"\n\nCHECK_HOST_DIRTY = True\nDIRTY_FILE_PATH = \"~/server_dirty\"\nEE_BASE = \"http://localhost:8000\"\n\n\nEMAIL_SIGNUP_ENDPOINT = 'https://democracyclub.org.uk/mailing_list/api_signup/v1/'\nEMAIL_SIGNUP_API_KEY = '{{ vault_email_signup_api_key }}'\n\nimport sentry_sdk\nfrom sentry_sdk.integrations.django import DjangoIntegration\n\nsentry_sdk.init(\n dsn=\"{{ vault_sentry_dsn }}\",\n integrations=[DjangoIntegration()]\n)\n","sub_path":"webapp_settings/production.py","file_name":"production.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"405126076","text":"# my code\n# text = input(\"Hey how's it going? \")\n# while text != \"stop copying me\":\n# text = input(text + \"\\n\")\n# print(\"UGH FINE YOU WIN\")\n\n# colt's code\nmsg = input(\"Say Something: \")\nwhile msg != \"stop copying me\":\n msg = input(f\"{msg}\\n\")\nprint(\"UGH FINE YOU WIL, BROTHER\")","sub_path":"081-092.looping_in_python/stop_copying_me.py","file_name":"stop_copying_me.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"210860252","text":"#3. Faça um programa para desenhar um retângulo no ecrã. Esse quadrado deverá ser desenhado por uma \n#função para a qual são passados três parâmetros: caracter a utilizar, número de linhas e número de \n#colunas. \n\ndef retangulo (carater, linhas , colunas):\n \n if colunas <=1 :\n quit\n elif linhas <=1 :\n quit\n else:\n print(carater * colunas)\n nada=str(\" \")\n \n linha=str(carater + nada*(colunas-2) + carater)\n nlinhas=linhas-1\n for _ in range (1, nlinhas):\n print(linha)\n print(carater * colunas)\n \n\n\ncarater1=str(input(\"Insira um carater:\"))\nlinhas1=int(input(\"Insira o nr de linhas:\"))\ncolunas1=int(input(\"Insira o nr de colunas:\"))\n\nretangulo(carater1 , linhas1 , colunas1)\n\n","sub_path":"Aval/fav4/exe3.py","file_name":"exe3.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"483628658","text":"import logging\nimport os\nfrom multiprocessing.dummy import Queue\n\nimport RedditClient as rc\nfrom Huntsman.ControlProcess import InsertControlProcess\nfrom Huntsman.Report import Report\nfrom env_config import PROGRAM_CONFIG, IS_PRODUCTION\nfrom servus.Node import Node\n\nlogger = logging.getLogger(__name__)\n\nrc.__reddit__ = rc.create_agent()\n\nnode = Node(PROGRAM_CONFIG)\nNUM_THREADS = int(os.getenv(\"NUM_THREADS\")) if os.getenv(\"NUM_THREADS\") else 20 \nMAX_CONNS = os.getenv(\"MAX_DB_CONNS\") if os.getenv(\"MAX_DB_CONNS\") else 20\nmetrics_queue = Queue()\nreport = Report(metrics_queue)\nnode.get_resources(\"subreddits\", 6)\n# enable reporting for the node\n\nsubreddits = ['btc', 'BlockChain', 'NEO', 'altcoin', 'CryptoMarkets', 'ethtrader']\n\nlogger.info(\"Huntsman started in \" + (\"production\" if IS_PRODUCTION else \"development\") + \" environment.\")\n\n\ndef main():\n rc.__reddit__ = rc.create_agent()\n status_queue = Queue()\n for sub_name in node.jobs[-1]['tasks']:\n\n control_process = InsertControlProcess(sub_name,\n status_queue,\n metrics_queue,\n NUM_THREADS\n )\n control_process.start()\n status = status_queue.get()\n\n if status != 'all clear':\n node.report_error(status)\n control_process.terminate()\n logger.info(\"main routine got an error; restarting.\")\n main() # restart\n else:\n html = report.make_html()\n node._report_metrics(html)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Huntsman/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"384985014","text":"import numpy as np\n#https://gamescapad.es/building-bots-in-starcraft-2-for-psychologists/#installation\nfrom pysc2.agents import base_agent\nfrom pysc2.lib import actions\nfrom pysc2.lib import features\nimport random\nimport time\nimport dill\n\n_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index\n_PLAYER_FRIENDLY = 1\n_PLAYER_NEUTRAL = 3 # beacon/minerals\n_PLAYER_HOSTILE = 4\n_NO_OP = actions.FUNCTIONS.no_op.id\n_MOVE_SCREEN = actions.FUNCTIONS.Move_screen.id\n_ATTACK_SCREEN = actions.FUNCTIONS.Attack_screen.id\n_SELECT_ARMY = actions.FUNCTIONS.select_army.id\n_NOT_QUEUED = [0]\n_SELECT_ALL = [0]\n\n_SELECT_POINT = actions.FUNCTIONS.select_point.id\n\n_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index\n_UNIT_TYPE = features.SCREEN_FEATURES.unit_type.index\n_PLAYER_ID = features.SCREEN_FEATURES.player_id.index\n\n_TERRAN_COMMANDCENTER = 18\n_TERRAN_SCV = 45\n_TERRAN_SUPPLY_DEPOT = 19\n_TERRAN_BARRACKS = 21\n_TERRAN_MARINE = 48\n\nclass CollecteMinerals(base_agent.BaseAgent):\n \"\"\"An agent specifically for solving the MoveToBeacon map.\"\"\"\n\n def __init__(self):\n super(CollecteMinerals, self).__init__()\n self.Select = True\n self.selected =[0,0]\n\n def step(self, obs):\n super(CollecteMinerals, self).step(obs)\n\n with open('obs.pkl','wb') as f:\n dill.dump(obs,f)\n\n #print(features)\n #print(features.SCREEN_FEATURES)\n\n time.sleep(0.1)\n\n if self.Select:\n self.Select = False\n unit_type = obs.observation['screen'][_UNIT_TYPE]\n unit_y, unit_x = (unit_type == _TERRAN_MARINE).nonzero()\n if unit_y.any():\n print(len(unit_y))\n i = random.randint(0, len(unit_y) - 1)\n target = [unit_x[i], unit_y[i]]\n self.selected = target\n\n return actions.FunctionCall(_SELECT_POINT, [_NOT_QUEUED, target])\n else:\n self.Select = True\n if _MOVE_SCREEN in obs.observation[\"available_actions\"]:\n player_relative = obs.observation[\"screen\"][_PLAYER_RELATIVE]\n neutral_y, neutral_x = (player_relative == _PLAYER_NEUTRAL).nonzero()\n if not neutral_y.any():\n return actions.FunctionCall(_NO_OP, [])\n\n closest, min_dist = None, None\n for p in zip(neutral_x, neutral_y):\n dist = np.linalg.norm(np.array(self.selected) - np.array(p))\n if not min_dist or dist < min_dist:\n closest, min_dist = p, dist\n\n return actions.FunctionCall(_MOVE_SCREEN, [_NOT_QUEUED, closest])\n\n '''if _MOVE_SCREEN in obs.observation[\"available_actions\"]:\n player_relative = obs.observation[\"screen\"][_PLAYER_RELATIVE]\n neutral_y, neutral_x = (player_relative == _PLAYER_NEUTRAL).nonzero()\n if not neutral_y.any():\n return actions.FunctionCall(_NO_OP, [])\n target = [int(neutral_x.mean()), int(neutral_y.mean())]\n return actions.FunctionCall(_MOVE_SCREEN, [_NOT_QUEUED, target])\n else:\n return actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL])'''","sub_path":"pr_collectMineralsGame_Agent_Scripted.py","file_name":"pr_collectMineralsGame_Agent_Scripted.py","file_ext":"py","file_size_in_byte":3017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"649122540","text":"\nimport RPi.GPIO as GPIO\nimport time\nimport sys\nimport math\nfrom hx711 import HX711\n\ndef cleanAndExit():\n print (\"Cleaning...\")\n GPIO.cleanup()\n print (\"Bye!\")\n sys.exit()\n#GPIO.setwarnings(False)\nGPIO.setmode(GPIO.BCM)\nhx = HX711(19,13,'A')\n\n# I've found out that, for some reason, the order of the bytes is not always the same between versions of python, numpy and the hx711 itself.\n# Still need to figure out why does it change.\n# If you're experiencing super random values, change these values to MSB or LSB until to get more stable values.\n# There is some code below to debug and log the order of the bits and the bytes.\n# The first parameter is the order in which the bytes are used to build the \"long\" value.\n# The second paramter is the order of the bits inside each byte.\n# According to the HX711 Datasheet, the second parameter is MSB so you shouldn't need to modify it.\nhx.set_reading_format(\"LSB\",\"MSB\")\n\n# HOW TO CALCULATE THE REFFERENCE UNIT\n# To set the reference unit to 1. Put 1kg on your sensor or anything you have and know exactly how much it weights.\n# In this case, 92 is 1 gram because, with 1 as a reference unit I got numbers near 0 without any weight\n# and I got numbers around 184000 when I added 2kg. So, according to the rule of thirds:\n# If 2000 grams is 184000 then 1000 grams is 184000 / 2000 = 92.\n#hx.set_reference_unit(61546.1209)\nhx.set_reference_unit(1)\n\nhx.reset()\nhx.tare()\nval_ant = 0\noffset = 0\noffset_ant = 0\ntry:\n\twhile True:\n\t\tval = hx.get_weight(1)\n\t\tprint(val)\n\t\toffset_ant = offset\n\t\tval_ant = val\n\n # hx.power_down()\n # hx.power_up()\n # time.sleep(0.1)\nexcept (KeyboardInterrupt, SystemExit):\n cleanAndExit()\nfinally:\n GPIO.cleanup()\n \n","sub_path":"Resultados/PFG/Datos/PFG/Main3/plantilla.py","file_name":"plantilla.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"218887710","text":"#!/usr/bin/python3.6\n#coding: utf-8\n\nfrom enum import Enum\n\nfrom pysnmp import hlapi\n\n\nclass SnmpVersions:\n\n V2C = 'v2c'\n V3 = 'v3'\n\n\nclass SnmpV3AuthProtos(Enum):\n\n NO_AUTH = hlapi.usmNoAuthProtocol\n HMAC_MD5 = hlapi.usmHMACMD5AuthProtocol\n HMAC_SHA = hlapi.usmHMACSHAAuthProtocol\n HMAC128_SHA224 = hlapi.usmHMAC128SHA224AuthProtocol\n HMAC192_SHA256 = hlapi.usmHMAC192SHA256AuthProtocol\n HMAC256_SHA384 = hlapi.usmHMAC256SHA384AuthProtocol\n HMAC384_SHA512 = hlapi.usmHMAC384SHA512AuthProtocol\n\n\nclass SnmpV3CryptoProtos(Enum):\n\n NO_ENCRYPTION = hlapi.usmNoPrivProtocol\n DES = hlapi.usmDESPrivProtocol\n DES_EDE = hlapi.usm3DESEDEPrivProtocol\n AES_128_CFB = hlapi.usmAesCfb128Protocol\n AES_192_CFB = hlapi.usmAesCfb192Protocol\n AES_256_CFB = hlapi.usmAesCfb256Protocol\n\n\nclass SnmpClient():\n\n ''' SNMP Client\n\n Supported SNMP v2c and v3.\n Users can provide the \"community\" argument to use SNMP v2c,\n otherwise, it's v3.\n '''\n\n def __init__(\n self, host, port=161, community=None,\n v3_user=None, v3_auth_proto=None, v3_auth_key=None,\n v3_crypto_proto=None, v3_crypto_key=None\n ):\n ''' Constructor\n\n :param host: target host, domain name or IP address, str\n :param port: target port, int\n :param community: SNMP v2c community, str\n :param v3_user: SNMP v3 username, str\n :param v3_auth_proto: SNMP v3 authorization protocol,\n should be choosed from SnmpV3AuthProtos\n :param v3_auth_key: SNMP v3 authorization key, str\n :param v3_crypto_proto: SNMP v3 cryptography protocol,\n should be choosed from SnmpV3CryptoProtos\n :param v3_crypto_key: SNMP v3 cryptography key, str\n '''\n\n self.target = (host, port)\n self.community = community\n self.v3_user = v3_user\n self.v3_auth_proto = v3_auth_proto\n self.v3_auth_key = v3_auth_key\n self.v3_crypto_proto = v3_crypto_proto\n self.v3_crypto_key = v3_crypto_key\n\n if community is not None:\n self.version = SnmpVersions.V2C\n self.auth_data = hlapi.CommunityData(self.community)\n else:\n self.version = SnmpVersions.V3\n\n if (\n self.v3_user is None or\n self.v3_auth_proto is None or\n self.v3_crypto_proto is None\n ):\n raise TypeError('Necessary arguments not provided')\n\n if self.v3_auth_proto not in SnmpV3AuthProtos:\n raise TypeError('Invalid SNMP v3 authorization protocol')\n\n if self.v3_crypto_proto not in SnmpV3CryptoProtos:\n raise DevError('Invalid SNMP v3 cryptography protocol')\n\n self.auth_data = hlapi.UsmUserData(\n self.v3_user,\n self.v3_auth_key,\n self.v3_crypto_key,\n self.v3_auth_proto,\n self.v3_crypto_proto,\n )\n\n def get_by_var(self, mib_name, var_name, position=None):\n oid = (\n (mib_name, var_name) if position is None else\n (mib_name, var_name, position)\n )\n\n res = hlapi.getCmd(\n hlapi.SnmpEngine(),\n self.auth_data,\n hlapi.UdpTransportTarget(self.target),\n hlapi.ContextData(),\n hlapi.ObjectType(\n hlapi.ObjectIdentity(*oid)\n ),\n )\n res = next(res)\n err_indication, err_status, err_index, var_binds = res\n return var_binds[0]\n\n def get_by_oid(self, oid):\n res = hlapi.getCmd(\n hlapi.SnmpEngine(),\n self.auth_data,\n hlapi.UdpTransportTarget(self.target),\n hlapi.ContextData(),\n hlapi.ObjectType(\n hlapi.ObjectIdentity(oid)\n ),\n )\n res = next(res)\n err_indication, err_status, err_index, var_binds = res\n return var_binds[0]\n","sub_path":"snmp.py","file_name":"snmp.py","file_ext":"py","file_size_in_byte":4192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"282322825","text":"# I want to be able to call capitalize_nested from main w/ various lists\n# and get returned a new nested list with all strings capitalized.\n# Ex. ['apple', ['bear'], 'cat']\n# Verify you've tested w/ various nestings.\n# In your final submission:\n# - Do not print anything extraneous!\n# - Do not put anything but pass in main()\n##############################################################\n#Body\ndef capitalize_nested(list_of_words):\n for each_list_item in list_of_words:\n if(str(type(each_list_item)) != \"\"):\n if(str(type(each_list_item)) == \"\"):\n capitalize_nested(each_list_item)\n else:\n list_of_words[list_of_words.index(each_list_item)] = each_list_item.capitalize()\n\n##############################################################\n\ndef main():\n list_1 = ['apple', ['bear'], 'cat', 'doggy', ['elbow', 'fin', 'garage']]\n list_2 = [[[['apple']], 'bear', 'cat', 'doggy',['elbow','fin','garage','house','indigo']], 'jump']\n list_3 = []\n list_4 = [\"doggy\"]\n list_4 = [[[[[[[\"this\"]]]]]]]\n capitalize_nested(list_1)\n capitalize_nested(list_2)\n capitalize_nested(list_3)\n capitalize_nested(list_4)\n print (list_1)\n print (list_2)\n print (list_3)\n print (list_4)\n\n pass\nif __name__ == '__main__':\n main()\n","sub_path":"HW07_ch10_ex02.py","file_name":"HW07_ch10_ex02.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"373314207","text":"import os \nimport pandas as pd \nimport numpy as np \nimport csv \n# ------------------------------------------\n## set path \n###### IMPORTANT: ARE WE CONSTRAINING MEANS ONLY? FALSE IF NO, TRUE IF YES! \nconstraint_mu_only = False \n\n\non_mac = False\non_thinkpad = False \non_hpg = True\n\nif on_mac == True: \n data_path = \"/Volumes/hodaakl/\"\n# spec_folder_onServer = data_path + 'Max/'\nif on_thinkpad== True: \n data_path = \"//exasmb.rc.ufl.edu/blue/pdixit/hodaakl/\"\nif on_hpg == True: \n data_path = \"/blue/pdixit/hodaakl/\"\n\n# specify the project you are working on \nspec_folder_onServer = data_path + 'A5MCMC_IGF_FoxO/'\nfoldername = '0413_Constrained/'\npath = spec_folder_onServer + foldername \n\n# Arrays path \narrays_path = spec_folder_onServer + 'Arrays_for_max_ent/'\n# where are you saving \nsave_locally=False \nif save_locally == True: \n path = foldername\n arrays_path = '/Users/hodaakl/Documents/github/MaxEnt_FoxO/Arrays_for_max_ent/'\n# ------------------------------------------\nprint('path is ',path)\nif not os.path.exists(path): \n os.mkdir(path)\nelse: \n raise ValueError(\"folder already exists, can't initialize lambda\")\n# ------------------------------------------\n\n# read_dictionary = np.load(arrays_path + 'cons_dict_mu_lnx_fraclr_020322.npy',allow_pickle='TRUE').item()\n#boundsdict = np.load(arrays_path + 'pcon_dict_250222.npy', allow_pickle=True).item()\n\n# real_cons = read_dictionary['array']\n#ncpc = len(boundsdict) # number constraints per condition\nnc = 28\nncpc = 9\nnCons = int(nc*ncpc)\nprint('length of constraints array = ', nCons)\n# real_cons = np.load(spec_folder_onServer + 'Arrays_for_max_ent/Cons_Arr_Means_SecMoment_72Scaled.npy')\n\nFull_lambda_init = np.zeros(nCons)\n# Full_lambda_init[int(len(real_cons)/2):] = np.ones(int(len(real_cons)/2))*10**(-6)\n# Full_lambda_init[int(len(real_cons)/2):] = -2*np.ones(int(len(real_cons)/2))\nif constraint_mu_only == True: \n nCons = int(nCons/2)\n \n# else: \n# nCons = full_con_num\n\nLambda_init = Full_lambda_init[:nCons]\n# save that lambda init \n# ------------------------------------------\n\n\nfile_name_lambda =path+ 'Lambdas.csv'\nwith open(file_name_lambda, 'w') as new_file_lambda:\n csv_writer_lambda = csv.writer(new_file_lambda, delimiter = ',')\n csv_writer_lambda.writerow(Lambda_init)\n new_file_lambda.flush()\n\nprint(f'Saved initial lambda: of length {len(Lambda_init)}')\nprint(f'lambda = {Lambda_init}')\n","sub_path":"IGF_FOXO/scratch/Code/ArchiveAndExperiment/Init_lagrangeMultipliers.py","file_name":"Init_lagrangeMultipliers.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"194649035","text":"import os\nimport os.path\nfrom Jumpscale import j\nimport fnmatch\n\nJSBASE = j.baseclasses.object\n\n\nclass SystemFSWalker(j.baseclasses.object):\n\n __jslocation__ = \"j.sal.fswalker\"\n\n @staticmethod\n def _checkDepth(path, depths, root=\"\"):\n if depths == []:\n return True\n path = j.sal.fs.pathRemoveDirPart(path, root)\n for depth in depths:\n dname = os.path.dirname(path)\n split = dname.split(os.sep)\n split = [item for item in split if item != \"\"]\n # print split\n if depth == len(split):\n return True\n else:\n return False\n\n @staticmethod\n def _checkContent(path, contentRegexIncludes=[], contentRegexExcludes=[]):\n if not (contentRegexIncludes or contentRegexExcludes):\n return True\n\n if not j.sal.fs.isFile(path):\n # in this case the link doesn't link to a file so we can't read its content\n # add it in case of contentRegexExcludes is enabled\n return bool(contentRegexExcludes)\n\n content = j.sal.fs.readFile(path)\n if contentRegexIncludes and not j.data.regex.matchMultiple(patterns=contentRegexIncludes, text=content):\n return False\n\n if contentRegexExcludes and j.data.regex.matchMultiple(patterns=contentRegexExcludes, text=content):\n return False\n\n return True\n\n @staticmethod\n def _findhelper(arg, path):\n self._log_debug(path)\n arg.append(path)\n\n @staticmethod\n def find(\n root,\n recursive=True,\n includeFolders=False,\n pathRegexIncludes=[\".*\"],\n pathRegexExcludes=[\".git\"],\n contentRegexIncludes=[],\n contentRegexExcludes=[],\n depths=[],\n followlinks=True,\n ):\n listfiles = []\n SystemFSWalker.walk(\n root,\n SystemFSWalker._findhelper,\n listfiles,\n recursive,\n includeFolders,\n pathRegexIncludes,\n pathRegexExcludes,\n contentRegexIncludes,\n contentRegexExcludes,\n depths,\n followlinks=followlinks,\n )\n return listfiles\n\n @staticmethod\n def walk(\n root,\n callback,\n arg=\"\",\n recursive=True,\n includeFolders=False,\n pathRegexIncludes=[\".*\"],\n pathRegexExcludes=[],\n contentRegexIncludes=[],\n contentRegexExcludes=[],\n depths=[],\n followlinks=True,\n ):\n \"\"\"Walk through filesystem and execute a method per file\n\n Walk through all files and folders starting at C{root}, recursive by\n default, calling a given callback with a provided argument and file\n path for every file we could find.\n\n If C{includeFolders} is True, the callback will be called for every\n folder we found as well.\n\n Examples\n ========\n >>> def my_print(arg, path):\n ... print arg, path\n ...\n >>> SystemFSWalker.walk('/foo', my_print, 'test:')\n test: /foo/file1\n test: /foo/file2\n test: /foo/file3\n test: /foo/bar/file4\n\n return False if you want recursion to stop (means don't go deeper)\n\n >>> def dirlister(arg, path):\n ... print 'Found', path\n ... arg.append(path)\n ...\n >>> paths = list()\n >>> SystemFSWalker.walk('/foo', dirlister, paths, recursive=False, includeFolders=True)\n /foo/file1\n /foo/file2\n /foo/file3\n /foo/bar\n >>> print paths\n ['/foo/file1', '/foo/file2', '/foo/file3', '/foo/bar']\n\n @param root: Filesystem root to crawl (string)\n @param callback: Callable to call for every file found, func(arg, path) (callable)\n @param arg: First argument to pass to callback\n @param recursive: Walk recursive or not (bool)\n @param includeFolders: Whether to call C{callable} for folders as well (bool)\n @param pathRegexIncludes / Excludes match paths to array of regex expressions (array(strings))\n @param contentRegexIncludes / Excludes match content of files to array of regex expressions (array(strings))\n @param depths array of depth values e.g. only return depth 0 & 1 (would mean first dir depth and then 1 more deep) (array(int))\n\n \"\"\"\n if not j.sal.fs.isDir(root):\n raise j.exceptions.Value(\"Root path for walk should be a folder\")\n if recursive is False:\n depths = [0]\n # We want to work with full paths, even if a non-absolute path is\n # provided\n root = os.path.abspath(root)\n\n # print \"ROOT OF WALKER:%s\"%root\n # print \"followlinks:%s\"%followlinks\n j.sal.fswalker._walk(\n root,\n callback,\n arg,\n includeFolders,\n pathRegexIncludes,\n pathRegexExcludes,\n contentRegexIncludes,\n contentRegexExcludes,\n depths,\n followlinks=followlinks,\n )\n\n # #if recursive:\n # for dirpath, dirnames, filenames in os.walk(root,followlinks=followlinks):\n # #Folders first\n # if includeFolders:\n # for dirname in dirnames:\n # path = os.path.join(dirpath, dirname)\n # if j.data.regex.matchMultiple(patterns=pathRegexIncludes,text=path) and \\\n # not j.data.regex.matchMultiple(patterns=pathRegexExcludes,text=path):\n # if SystemFSWalker._checkDepth(path,depths,root) and \\\n # SystemFSWalker._checkContent(path,contentRegexIncludes, contentRegexExcludes):\n # result=callback(arg, path)\n # for filename in filenames:\n # path = os.path.join(dirpath, filename)\n # if j.data.regex.matchMultiple(patterns=pathRegexIncludes,text=path) and not j.data.regex.matchMultiple(patterns=pathRegexExcludes,text=path):\n # if SystemFSWalker._checkDepth(path,depths,root) and SystemFSWalker._checkContent(path,contentRegexIncludes, contentRegexExcludes):\n # callback(arg, path)\n\n @staticmethod\n def _walk(\n path,\n callback,\n arg=\"\",\n includeFolders=False,\n pathRegexIncludes=[\".*\"],\n pathRegexExcludes=[],\n contentRegexIncludes=[],\n contentRegexExcludes=[],\n depths=[],\n followlinks=True,\n ):\n\n for path2 in j.sal.fs.listFilesAndDirsInDir(path, followSymlinks=followlinks, listSymlinks=True):\n\n if j.sal.fs.isDir(path2, followlinks):\n if includeFolders:\n result = True\n if j.data.regex.matchMultiple(\n patterns=pathRegexIncludes, text=path2\n ) and not j.data.regex.matchMultiple(patterns=pathRegexExcludes, text=path2):\n if SystemFSWalker._checkDepth(path2, depths, path) and SystemFSWalker._checkContent(\n path2, contentRegexIncludes, contentRegexExcludes\n ):\n result = callback(arg, path2)\n if result is False:\n continue # do not recurse go to next dir\n # recurse\n j.sal.fswalker._walk(\n path2,\n callback,\n arg,\n includeFolders,\n pathRegexIncludes,\n pathRegexExcludes,\n contentRegexIncludes,\n contentRegexExcludes,\n depths,\n followlinks,\n )\n\n elif j.sal.fs.isFile(path2, followlinks):\n if j.data.regex.matchMultiple(\n patterns=pathRegexIncludes, text=path2\n ) and not j.data.regex.matchMultiple(patterns=pathRegexExcludes, text=path2):\n if SystemFSWalker._checkDepth(path2, depths, path) and SystemFSWalker._checkContent(\n path2, contentRegexIncludes, contentRegexExcludes\n ):\n callback(arg, path2)\n\n @staticmethod\n def walkFunctional(\n root,\n callbackFunctionFile=None,\n callbackFunctionDir=None,\n arg=\"\",\n callbackForMatchDir=None,\n callbackForMatchFile=None,\n findDirectorySymlinks=True,\n ):\n \"\"\"Walk through filesystem and execute a method per file and dirname\n\n Walk through all files and folders starting at C{root}, recursive by\n default, calling a given callback with a provided argument and file\n path for every file & dir we could find.\n\n To match the function use the callbackForMatch function which are separate for dir or file\n when it returns True the path will be further processed\n when None (function not given match will not be done)\n\n Examples\n ========\n >>> def my_print(path,arg):\n ... print arg, path\n ...\n #if return False for callbackFunctionDir then recurse will not happen for that dir\n\n >>> def matchDirOrFile(path,arg):\n ... return True #means will match all\n ...\n\n >>> SystemFSWalker.walkFunctional('/foo', my_print,my_print, 'test:',matchDirOrFile,matchDirOrFile)\n test: /foo/file1\n test: /foo/file2\n test: /foo/file3\n test: /foo/bar/file4\n\n @param root: Filesystem root to crawl (string)\n #TODO: complete\n\n \"\"\"\n # We want to work with full paths, even if a non-absolute path is\n # provided\n root = os.path.abspath(root)\n\n if not j.sal.fs.isDir(root):\n raise j.exceptions.Value(\"Root path for walk should be a folder, {}\".format(root))\n\n # print \"ROOT OF WALKER:%s\"%root\n SystemFSWalker._walkFunctional(\n root,\n callbackFunctionFile,\n callbackFunctionDir,\n arg,\n callbackForMatchDir,\n callbackForMatchFile,\n findDirectorySymlinks=findDirectorySymlinks,\n )\n\n @staticmethod\n def _walkFunctional(\n path,\n callbackFunctionFile=None,\n callbackFunctionDir=None,\n arg=\"\",\n callbackForMatchDir=None,\n callbackForMatchFile=None,\n findDirectorySymlinks=True,\n ):\n\n paths = sorted(j.sal.fs.listFilesInDir(path, listSymlinks=True))\n for path2 in paths:\n if callbackForMatchFile is False:\n continue\n if callbackForMatchFile is None or callbackForMatchFile(path2, arg):\n # execute\n callbackFunctionFile(path2, arg)\n\n paths = sorted(j.sal.fs.listDirsInDir(path, findDirectorySymlinks=findDirectorySymlinks))\n for path2 in paths:\n # print \"walker dirpath:%s\"% path2\n if callbackForMatchDir is None or callbackForMatchDir(path2, arg):\n # recurse\n # print \"walker matchdir:%s\"% path2\n if callbackFunctionDir is None:\n j.sal.fswalker._walkFunctional(\n path2, callbackFunctionFile, callbackFunctionDir, arg, callbackForMatchDir, callbackForMatchFile\n )\n else:\n result = callbackFunctionDir(path2, arg)\n if result:\n # print \"walker recurse:%s\"% path2\n j.sal.fswalker._walkFunctional(\n path2,\n callbackFunctionFile,\n callbackFunctionDir,\n arg,\n callbackForMatchDir,\n callbackForMatchFile,\n findDirectorySymlinks=findDirectorySymlinks,\n )\n\n @staticmethod\n def walkExtended(root, recurse=0, dirPattern=\"*\", filePattern=\"*\", followSoftLinks=True, dirs=True, files=True):\n \"\"\"\n Extended Walk version: seperate dir and file pattern\n @param root : start directory to start the search.\n @type root : string\n @param recurse : search also in subdirectories.\n @type recurse : number\n @param dirPattern : search pattern to match directory names. Wildcards can be included.\n @type dirPattern : string\n @param filePattern : search pattern to match file names. Wildcards can be included.\n @type filePattern : string\n @param followSoftLinks : determine if links must be followed.\n @type followSoftLinks : boolean\n @param dirs : determine to return dir results.\n @type dirs : boolean\n @param files : determine to return file results.\n @type files : boolean\n @return : List of files and / or directories that match the search patterns.\n @rtype : list of strings\n General guidelines in the usage of the method be means of some examples come next. For the example in /tmp there is\n * a file test.rtt\n * and ./folder1/subfolder/subsubfolder/small_test/test.rtt\n To find the first test you can use\n j.sal.fswalker.walkExtended('/tmp/', dirPattern=\"*tmp*\", filePattern=\"*.rtt\")\n To find only the second one you could use\n j.sal.fswalker.walkExtended('tmp', recurse=0, dirPattern=\"*small_test*\", filePattern=\"*.rtt\", dirs=False)\n \"\"\"\n result = []\n try:\n names = os.listdir(root)\n except os.error:\n return result # TODO: P2 is this correct?\n\n dirPattern = dirPattern or \"*\"\n dirPatList = dirPattern.split(\";\")\n filePattern = filePattern or \"*\"\n filePatList = filePattern.split(\";\")\n\n for name in names:\n fullname = os.path.normpath(os.path.join(root, name))\n if j.sal.fs.isFile(fullname, followSoftLinks):\n fileOK = False\n dirOK = False\n for fPat in filePatList:\n if fnmatch.fnmatch(name, fPat):\n fileOK = True\n for dPat in dirPatList:\n if fnmatch.fnmatch(os.path.dirname(fullname), dPat):\n dirOK = True\n if fileOK and dirOK and files:\n result.append(fullname)\n if j.sal.fs.isDir(fullname, followSoftLinks):\n for dPat in dirPatList:\n if fnmatch.fnmatch(name, dPat) and dirs:\n result.append(fullname)\n if recurse:\n result = result + j.sal.fswalker.walkExtended(\n root=fullname,\n recurse=recurse,\n dirPattern=dirPattern,\n filePattern=filePattern,\n followSoftLinks=followSoftLinks,\n dirs=dirs,\n files=files,\n )\n\n return result\n","sub_path":"JumpscaleCore/sal/fs/SystemFSWalker.py","file_name":"SystemFSWalker.py","file_ext":"py","file_size_in_byte":15292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"162074154","text":"# -*- coding: utf-8 -*-\n\"\"\"迁移bt索引\"\"\"\nimport time\nimport traceback\nfrom elasticsearch import Elasticsearch\n\nes_urls = ['https://search-xles03-xw27kvqlra4onvljuqbbemmb4q.ap-south-1.es.amazonaws.com']\nes_client = Elasticsearch(es_urls)\n\n\ndef update_movie():\n offset, limit = 0, 1000\n while True:\n print(offset, limit)\n query = {\n \"from\": offset,\n \"size\": limit,\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"term\": {\"type\": \"mv\"}\n },\n {\n \"term\": {\"genre\": \"indiantimes\"}\n }\n ]\n }\n }\n }\n\n try:\n query_result = es_client.search(\"resources\", \"doc\", query)\n except Exception:\n traceback.print_exc()\n time.sleep(5)\n continue\n else:\n hits = query_result['hits']['hits']\n if not hits:\n break\n\n for item in hits:\n id_ = item['_id']\n source_ = item['_source']\n language = source_.get('language')\n if not language:\n continue\n print(id_, language)\n if isinstance(language, list):\n continue\n\n source_['language'] = [language]\n try:\n es_client.index('resources', 'doc', source_, id=id_)\n except Exception as e:\n print(id_)\n traceback.print_exc()\n time.sleep(5)\n\n offset += limit\n\n\nif __name__ == '__main__':\n update_movie()\n","sub_path":"scripts/update_movie.py","file_name":"update_movie.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"240831481","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jun 4 16:03:20 2018\r\n\r\n@author: Lalit\r\n\"\"\"\r\n\r\nnum = int(input(\"Enter a number: \"))\r\n\r\nif num > 1:\r\n \r\n for i in range(2,num):\r\n if(num%i) ==0:\r\n print(num, \"is not a prime number\")\r\n \r\n break\r\n \r\n else:\r\n \r\n print(num, \"is a prime number\")","sub_path":"prime.py","file_name":"prime.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"301564683","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom copy import deepcopy\nimport urllib\n\n\n# 使用spider爬取亚马逊网站的图书内容\nclass SpiderAmazonSpider(scrapy.Spider):\n name = 'spider_amazon'\n allowed_domains = ['amazon.cn']\n start_urls = ['https://www.amazon.cn/%E5%9B%BE%E4%B9%A6/b?ie=UTF8&node=658390051']\n\n def parse(self, response):\n # 获取左侧分类的一级列表\n l_list = response.xpath(\"//ul[@class='a-unordered-list a-nostyle a-vertical s-ref-indent-one']/div/li\")\n for li in l_list:\n # 提取一级分类的名称和href\n item = {}\n item[\"l_cate\"] = li.xpath(\"./span/a/span/text()\").get()\n item[\"l_href\"] = li.xpath(\"./span/a/@href\").get()\n\n yield scrapy.Request(\n item[\"l_href\"],\n callback=self.parse_m_cate,\n meta={\"item\":deepcopy(item)}\n )\n\n def parse_m_cate(self, response):\n \"\"\"用于处理二级分类的名称和href\"\"\"\n item = response.meta[\"item\"]\n # 获取左侧分类的二级列表\n m_list = response.xpath(\"//ul[@class='a-unordered-list a-nostyle a-vertical s-ref-indent-two']/div/li\")\n for li in m_list:\n # 提取二级分类的名称和href\n item[\"m_cate\"] = li.xpath(\"./span/a/span/text()\").get()\n item[\"m_href\"] = li.xpath(\"./span/a/@href\").get()\n\n # 此处对应的链接,即三级分类页面,可以用于获取四级分类页面,也会在右侧直接展示图书列表。\n # 部分页面可能没有再细分第四级页面,此处直接通过三级分类获取图书信息,避免因为获取不到四级分类而报错\n yield scrapy.Request(\n item[\"m_href\"],\n callback=self.parse_book_list,\n meta={\"item\":deepcopy(item)}\n )\n\n def parse_book_list(self, response):\n \"\"\"获取图书的标题,价格以及详情信息\"\"\"\n item = response.meta[\"item\"]\n # 获取图书列表\n # 此时的图书列表展示页与源代码展示页面不同,需要修改获取的ul对象\n book_list = response.xpath(\"//ul[@class='s-result-list s-col-1 s-col-ws-1 s-result-list-hgrid s-height-equalized s-list-view s-text-condensed']/li\")\n for li in book_list:\n item[\"title\"] = li.xpath(\".//h2/text()\").get()\n item[\"price\"] = li.xpath(\".//span[@class='a-size-base a-color-price s-price a-text-bold']/text()\").get()\n item[\"href\"] = li.xpath(\".//a[@class='a-link-normal s-access-detail-page s-color-twister-title-link a-text-normal']/@href\").get()\n print(\"item: \", item)\n\n # 获取下一页的图书信息\n next_url = response.xpath(\".//span[@class='pagnRA']/a/@href\").get()\n if next_url is not None:\n # 拼接完整的url地址\n next_url = urllib.parse.urljoin(response.url, next_url)\n yield scrapy.Request(\n next_url,\n callback=self.parse_book_list,\n meta={\"item\":item}\n )\n\n\n # def parse_s_cate(self, response):\n # \"\"\"用于处理三级分类的名称和href\"\"\"\n # item = response.meta[\"item\"]\n # # 获取左侧分类的三级列表,下同二级分类\n # s_list = response.xpath(\"//ul[@class='a-unordered-list a-nostyle a-vertical s-ref-indent-two']/div/li\")\n # for li in s_list:\n # # 提取三级分类的名称和href\n # item[\"s_cate\"] = li.xpath(\"./span/a/span/text()\").get()\n # item[\"s_href\"] = li.xpath(\"./span/a/@href\").get()\n\n # # 此处对应的链接,可以用于获取三级列表对应的图书详情页\n # yield scrapy.Request(\n # item[\"m_href\"],\n # callback=self.parse_s_cate,\n # meta={\"item\":deepcopy(item)}\n # )\n","sub_path":"005-Scrapy_Redis/003-amazon/amazon/spiders/spider_amazon.py","file_name":"spider_amazon.py","file_ext":"py","file_size_in_byte":3927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"4616750","text":"import requests\nimport json\nimport jsonpath\n\n# API Url\nurl = \"https://reqres.in/api/users/2\"\n\n# Open and read json file in read mode\nfile = open(\"D:\\\\APIAutomation\\\\API Requests\\\\create_user.json\", \"r\")\njson_input = file.read()\n\n# Convert string to json\nrequest_json = json.loads(json_input)\n\n# Make PUT request with JSON input body\nresponse = requests.put(url, request_json)\n\n# Validating response code\nassert response.status_code == 200\n\n# Parse response content\nresponse_json = json.loads(response.text)\nupdated_li = jsonpath.jsonpath(response_json, 'updatedAt')\nprint(updated_li[0])\n\n","sub_path":"API Requests/update_user.py","file_name":"update_user.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"453220570","text":"import pygame\nfrom pygame.sprite import Sprite \n\nclass Vaccine(Sprite):\n \"\"\"Manage vaccines doctor fires to virus.\"\"\"\n\n def __init__(self, ai_game):\n \"\"\"Create a vaccine object at the doctor's current position\"\"\"\n super().__init__()\n self.screen = ai_game.screen\n self.settings = ai_game.settings\n # self.color = self.settings.vaccine_color\n self.screen_rect = ai_game.screen.get_rect()\n\n self.image = pygame.image.load('images/pill.png')\n self.rect = self.image.get_rect()\n\n self.rect.midright = ai_game.doctor.rect.midright\n\n # Store the vaccine's position as a decimal value.\n self.y = float(self.rect.y)\n\n \n def update(self):\n \"\"\"Move the vaccine up the screen.\"\"\"\n # Update the decimal position of the vaccine.\n self.y -= self.settings.vaccine_speed\n # Update the rect position.\n self.rect.y = self.y\n\n def draw_vaccine(self):\n \"\"\"Draw the vaccine to the screen.\"\"\"\n self.screen.blit(self.image, self.rect)\n\n\n\n","sub_path":"vaccine.py","file_name":"vaccine.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"324474035","text":"import functools\nimport typing\nimport string\nimport random\nimport pytest\n\n## Lösung Teil 1.\ndef divisors(n: int) -> list:\n div_list = []\n if n < 0:\n print(\"positivity please!\")\n else:\n for d in range(0,n+1):\n if n % d == 0:\n div_list += [d]\n return div_list\n\nprint(divisors(5))\n######################################################################\n## Lösung Teil 2. (Tests)\ndef divisors_test():\n assert divisors(-1) == print(\"positivity please!\")\n assert divisors(0) == []\n assert divisors(5) == [1,5]\n assert divisors(5) == [2]\n######################################################################\n","sub_path":"StudentProblem/10.21.11.11/7/1569574207.py","file_name":"1569574207.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"263425985","text":"from PyQt5 import QtWidgets, uic, QtCore, QtGui\n\nfrom ui.utils.popups import *\nimport sys\n\n\nclass initUI(QtWidgets.QMainWindow):\n\n features = ['first_order', 'glcm', 'gldm', 'glrlm', 'glszm', 'ngtdm', 'shape', 'shape_2D']\n\n def __init__(self):\n super(initUI, self).__init__() # Call the inherited classes __init__ method\n uic.loadUi('radiomics-feature-extractor.ui', self) # Load the .ui file\n\n # Initialize main window\n self._init_main_window()\n\n # Initialize tabs\n self._init_input_tab()\n self._init_settings_tab()\n\n self.show() # Show the GUI\n\n def _init_main_window(self):\n # Init tab widget\n self.tab_widget = self.findChild(QtWidgets.QTabWidget, 'tabWidget')\n self.tab_widget.setTabVisible(0, True)\n self.tab_widget.setTabVisible(1, False)\n self.tab_widget.setTabVisible(2, False)\n\n self.tab_widget.tabBar().installEventFilter(self)\n\n # Init push buttons\n self.next_btn = self.findChild(QtWidgets.QPushButton, 'next_btn')\n self.back_btn = self.findChild(QtWidgets.QPushButton, 'back_btn')\n self.reset_btn = self.findChild(QtWidgets.QPushButton, 'reset_btn')\n\n self.next_btn.clicked.connect(self._next_button_clicked)\n self.back_btn.clicked.connect(self._back_button_clicked)\n self.reset_btn.clicked.connect(self._reset_button_clicked)\n\n # Init debug mode\n self.debug_mode_checkbox = self.findChild(QtWidgets.QCheckBox, 'debug_mode_checkbox')\n self.log_text_edit = self.findChild(QtWidgets.QTextEdit, 'log_text_edit')\n self.log_label = self.findChild(QtWidgets.QLabel, 'log_label')\n\n self.log_text_edit.setProperty('visible', False)\n self.log_label.setProperty('visible', False)\n\n self.debug_mode_checkbox.toggled.connect(self._debug_mode_checkbox_toggled)\n\n def _init_input_tab(self):\n self.input_tab = self.findChild(QtWidgets.QWidget, 'input_tab')\n\n # Initialize variables\n self.image_file_path = None\n self.ROI_file_path = None\n self.csv_file_path = None\n\n # Init radio buttons\n self.single_image_radio = self.findChild(QtWidgets.QRadioButton, 'single_image_radio')\n self.single_image_radio.toggled.connect(lambda l: {\n self.upload_image_btn.setProperty('enabled', True),\n self.upload_ROI_btn.setProperty('enabled', True),\n self.upload_csv_btn.setProperty('enabled', False),\n self._clear_input()\n })\n\n self.batch_images_radio = self.findChild(QtWidgets.QRadioButton, 'batch_images_radio')\n self.batch_images_radio.toggled.connect(lambda l: {\n self.upload_image_btn.setProperty('enabled', False),\n self.upload_ROI_btn.setProperty('enabled', False),\n self.upload_csv_btn.setProperty('enabled', True),\n self._clear_input()\n })\n\n # Init push buttons\n self.upload_image_btn = self.findChild(QtWidgets.QPushButton, 'upload_image_btn')\n self.upload_ROI_btn = self.findChild(QtWidgets.QPushButton, 'upload_ROI_btn')\n self.upload_csv_btn = self.findChild(QtWidgets.QPushButton, 'upload_csv_btn')\n\n self.upload_image_btn.clicked.connect(lambda l: open_dicom_image(self))\n self.upload_ROI_btn.clicked.connect(lambda l: open_dicom_ROI(self))\n self.upload_csv_btn.clicked.connect(lambda l: open_csv_file(self))\n\n # Init path labels\n self.label_image_path = self.findChild(QtWidgets.QLabel, 'label_image_path')\n self.label_ROI_path = self.findChild(QtWidgets.QLabel, 'label_ROI_path')\n self.label_csv_path = self.findChild(QtWidgets.QLabel, 'label_csv_path')\n\n def _init_settings_tab(self):\n self.settings_tab = self.findChild(QtWidgets.QWidget, 'settings_tab')\n\n # Initialize variables\n self.is_any_feature_checkbox_selected = False\n\n # Initialize checkboxes\n for feature in self.features:\n self.__setattr__('checkbox_' + feature, self.findChild(QtWidgets.QCheckBox, 'checkbox_' + feature))\n self.__getattribute__('checkbox_' + feature).toggled.connect(self._feature_checkbox_toggled)\n\n # Initialize buttons\n self.select_all_none_btn = self.findChild(QtWidgets.QPushButton, 'select_all_none_btn')\n self.select_all_none_btn.clicked.connect(self._select_all_none_btn_clicked)\n\n def _feature_checkbox_toggled(self):\n if self._is_any_feature_selected():\n self.next_btn.setProperty('enabled', True)\n else:\n self.next_btn.setProperty('enabled', False)\n\n def _select_all_none_btn_clicked(self):\n if self.select_all_none_btn.text() == 'Select All':\n self.select_all_none_btn.setText('Select None')\n self.next_btn.setProperty('enabled', True)\n for feature in self.features:\n self.__getattribute__('checkbox_' + feature).setProperty('checked', True)\n else:\n self.select_all_none_btn.setText('Select All')\n self.next_btn.setProperty('enabled', False)\n for feature in self.features:\n self.__getattribute__('checkbox_' + feature).setProperty('checked', False)\n\n def _next_button_clicked(self):\n is_tab_ready = False # The requirements for each tabs are different\n\n # Input tab\n if self.tab_widget.currentIndex() == 0:\n if (self.image_file_path and self.ROI_file_path) or self.csv_file_path:\n is_tab_ready = True\n\n # Settings tab\n elif self.tab_widget.currentIndex() == 1:\n if self._is_any_feature_selected():\n is_tab_ready = True\n # Execute pyradiomics feature extraction\n exec('pyradiomics ' + self.image_file_path + ' ' + self.ROI_file_path)\n\n # Go to the next tab if it exists\n if is_tab_ready and (\n self.tab_widget.currentIndex() < self.tab_widget.count() - 1): # -1 because it is not zero based\n self.tab_widget.setTabVisible(self.tab_widget.currentIndex() + 1, True)\n self.tab_widget.setCurrentIndex(self.tab_widget.currentIndex() + 1)\n # Disable next button\n self.next_btn.setProperty('enabled', False)\n # Enable back button\n self.back_btn.setProperty('enabled', True)\n\n is_tab_ready = False\n\n def _back_button_clicked(self):\n # Go to the previous tab if it exists\n if self.tab_widget.currentIndex() > 0:\n self.tab_widget.setTabVisible(self.tab_widget.currentIndex(), False)\n self.tab_widget.setCurrentIndex(self.tab_widget.currentIndex() - 1)\n # Disable back button if you are on the first tab\n if self.tab_widget.currentIndex() == 0:\n self.back_btn.setProperty('enabled', False)\n # Enable next button\n self.next_btn.setProperty('enabled', True)\n\n def _debug_mode_checkbox_toggled(self):\n self.log_text_edit.setProperty('visible', not self.log_text_edit.property('visible'))\n self.log_label.setProperty('visible', not self.log_label.property('visible'))\n\n def _reset_button_clicked(self):\n self._clear_input()\n self._hide_tabs()\n self.log_text_edit.clear()\n\n def _hide_tabs(self):\n # Hide all tabs and move on the first one\n tabs_count = self.tab_widget.count() # because it is not zero based\n for index in range(tabs_count):\n self.tab_widget.setTabVisible(index + 1, False)\n self.tab_widget.setCurrentIndex(0)\n\n def _clear_input(self):\n self.image_file_path = None\n self.ROI_file_path = None\n self.csv_file_path = None\n self.next_btn.setProperty('enabled', False)\n self.label_image_path.setText('')\n self.label_ROI_path.setText('')\n self.label_csv_path.setText('')\n\n def _is_any_feature_selected(self):\n for feature in self.features:\n feature_checkbox = self.__getattribute__('checkbox_' + feature)\n if feature_checkbox.property('checked'):\n return True\n return False\n\n def eventFilter(self, obj, event):\n if event.type() == QtCore.QEvent.KeyPress and (event.key() == 16777217 or event.key() == 16777218):\n return True # eat alt+tab or alt+shift+tab key\n if event.type() in (QtCore.QEvent.MouseButtonPress, QtCore.QEvent.MouseButtonDblClick):\n return True # eat mouse click\n else:\n # standard event processing\n return super(initUI, self).eventFilter(obj, event)\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n view = initUI()\n app.exec_()\n","sub_path":"pyradiomics/ui/feature-extractor-gui.py","file_name":"feature-extractor-gui.py","file_ext":"py","file_size_in_byte":8738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"377590779","text":"#!/usr/bin/python3\n\n# GRAMMAR\n# S -> A A\n# A -> B | C\n# B -> b\n# C -> c\n\ndef source():\n return \"\"\n # return \"bb\"\n # return \"bc\"\n # return \"cb\"\n # return \"cc\"\n\nclass LexerException(Exception):\n def __init__(self, line, column, msg=\"Token error.\"):\n self.line = line\n self.column = column\n self.msg = msg\n\nclass ParserException(Exception):\n def __init__(self, line, column, msg=\"Syntax error.\"):\n self.line = line\n self.column = column\n self.msg = msg\n\n#------------------------------------------------------------------------------#\n\nclass Token:\n def __init__(self, char, line, column):\n self.line = line\n self.column = column\n if char == 'b':\n self.type = 'B'\n elif char == 'c':\n self.type = 'C'\n else:\n raise LexerException(line, column, \"bad token: {}\".format(char))\n\ndef tokenize(source):\n column = 0\n tokens = []\n for char in source:\n token = Token(char, 0, column)\n tokens.append(token)\n column += 1\n return tokens\n\n#------------------------------------------------------------------------------#\n\ndef parse(tokens):\n return _s(tokens)\n\ndef _s(tokens0):\n ast1, tokens1 = _a(tokens0)\n ast2, tokens2 = _a(tokens1)\n if [] != tokens2:\n raise ParserException(tokens2[0].line, tokens2[0].column, \"\")\n return ast1 + ast2\n\ndef _a(tokens):\n if tokens[0].type == 'B':\n return (_b(tokens), tokens[1:])\n elif tokens[0].type == 'C':\n return (_c(tokens), tokens[1:])\n else:\n raise ParserException(tokens[0].line, tokens[0].column)\n\ndef _b(tokens):\n if [] != tokens[1:]:\n raise ParserException(tokens[0].line, tokens[0].column, \"\")\n return ['B']\n\ndef _c(tokens):\n if [] != tokens[1:]:\n raise ParserException(tokens[0].line, tokens[0].column, \"\")\n return ['C']\n\n#------------------------------------------------------------------------------#\n\ndef main():\n print(parse(tokenize(source())))\n\ndef test():\n try:\n ['B', 'B'] == parse(tokenize(\"bb\"))\n ['B', 'C'] == parse(tokenize(\"bc\"))\n ['C', 'B'] == parse(tokenize(\"cb\"))\n ['C', 'C'] == parse(tokenize(\"cc\"))\n except Exception as e:\n print(\"Error: {}\".format(e))\n try:\n ['A', 'D'] == parse(tokenize(\"ad\"))\n raise Exception(\"Tokenized something that shouldn't have!\")\n except LexerException as e:\n pass\n try:\n ['B', 'B', 'B'] == parse(tokenize(\"bbb\"))\n raise Exception(\"Parsed something that shouldn't have!\")\n except ParserException as e:\n pass\n print(\"tests pass!\")\n\nif __name__ == \"__main__\":\n test()\n","sub_path":"python/scheduler/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"210314262","text":"# Implement a method to perform basic string compression using the counts of repeated characters.\n# For example, the string aabcccccaaa would become a2b1c5a3. If the \"compressed\" string would not\n# become smaller than the original string, your method should return the original string.\n# You can assume the string has only uppercase and lowercase letters (a - z).\n\n\ndef string_compression(s):\n if len(s) < 2:\n return s\n\n compressed = \"\"\n count = 1\n compressed += s[0]\n\n for i in range(len(s) - 1):\n if s[i] == s[i + 1]:\n count += 1\n else:\n if count >= 1:\n compressed += str(count)\n compressed += s[i + 1]\n count = 1\n if count >= 1:\n compressed += str(count)\n\n if len(compressed) >= len(s):\n return s\n return compressed\n\n\ndef main():\n print(string_compression(\"aabcccccaaa\"))\n print(string_compression(\"abc\"))\n print(string_compression(\"mississippi\"))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"CTCI_Questions/ArrayAndStrings/StringCompression/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"507260945","text":"'''\nCreated on Oct 9, 2014\n\n@author: anedospe\n'''\nimport pkgutil\nimport os\n\ndef get_module_name(mod_name):\n \n path = os.path.join(os.path.dirname(__file__))\n modules = pkgutil.iter_modules(path=[path])\n\n for loader, modname, ispkg in modules: \n if modname == mod_name:\n #import module at the runtime\n loaded_mod = __import__(\"modules.\" + modname)\n\n #where mod_name must have the same name for module and class\n loaded_class = getattr(getattr(loaded_mod, mod_name), mod_name)\n #create instance of a class\n instance = loaded_class() \n \n return instance\n\n\n \n \n ","sub_path":"modules/load_module.py","file_name":"load_module.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"204499018","text":"A = input()\nB = input()\n\ndp = [[0] * 1001 for _ in range(1001)]\n\nans = 0\nfor i in range(len(A)) :\n for j in range(len(B)) :\n if A[i] == B[j] :\n dp[i+1][j+1] = dp[i][j] + 1\n ans = max(ans,dp[i+1][j+1])\n else :\n dp[i+1][j+1] = max(dp[i][j+1],dp[i+1][j])\n\nret = \"\"\nla = len(A); lb = len(B)\n\nwhile dp[la][lb] != 0 :\n if dp[la][lb] == dp[la-1][lb] : la -= 1\n elif dp[la][lb] == dp[la][lb-1] : lb -= 1\n else :\n ret = A[la-1] + ret\n la-=1; lb-=1\n\nprint(ans)\nprint(ret)\n","sub_path":"BOJ/27_동적 계획법과 최단거리 역추적/9252_LCS2.py","file_name":"9252_LCS2.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"637088385","text":"\"\"\"\nThis model translates default strings into localized strings.\n\"\"\"\n\nfrom collections import deque\nimport time\nimport math\nfrom django.conf import settings\nfrom twisted.internet import reactor\nfrom twisted.internet import task\nfrom evennia.utils import logger\nfrom evennia import create_script\nfrom evennia.utils.search import search_object\nfrom muddery.server.dao.honours_mapper import HONOURS_MAPPER\nfrom muddery.server.utils.localized_strings_handler import _\nfrom muddery.server.utils.defines import CombatType\nfrom muddery.server.dao.honour_settings import HonourSettings\n\n\nclass MatchPVPHandler(object):\n \"\"\"\n This model translates default strings into localized strings.\n \"\"\"\n def __init__(self):\n \"\"\"\n Initialize handler\n \"\"\"\n self.max_honour_diff = 0\n self.preparing_time = 0\n self.match_interval = 10\n\n self.waiting_queue = deque()\n self.preparing = {}\n self.loop = None\n\n self.reset()\n \n def __del__(self):\n \"\"\"\n Clear all resources.\n \"\"\"\n if self.loop and self.loop.running:\n self.loop.stop()\n\n self.remove_all()\n\n def remove_all(self):\n \"\"\"\n # Remove all characters in the waiting queue.\n \"\"\"\n for char_id, info in self.preparing.values():\n call_id = info[\"call_id\"]\n call_id.cancel()\n character = search_object(\"#%s\" % char_id)\n if character:\n character.msg({\"match_rejected\": char_id})\n self.preparing.clear()\n\n for char_id in self.waiting_queue:\n character = search_object(\"#%s\" % char_id)\n if character:\n character.msg({\"left_combat_queue\": \"\"})\n self.waiting_queue.clear()\n\n def reset(self):\n \"\"\"\n Reset the waiting queue.\n \"\"\"\n if self.loop and self.loop.running:\n self.loop.stop()\n\n # Remove all characters in the waiting queue.\n self.remove_all()\n\n honour_settings = HonourSettings.get_first_data()\n self.max_honour_diff = honour_settings.max_honour_diff\n self.preparing_time = honour_settings.preparing_time\n self.match_interval = honour_settings.match_interval\n\n self.loop = task.LoopingCall(self.match)\n self.loop.start(self.match_interval)\n\n def add(self, character):\n \"\"\"\n Add a character to the queue.\n \"\"\"\n character_id = character.id\n\n if character_id in self.waiting_queue:\n return\n \n self.waiting_queue.append(character_id)\n character.msg({\"in_combat_queue\": \"\"})\n\n def remove_by_id(self, character_id):\n \"\"\"\n Remove a character from the queue.\n \"\"\"\n character = search_object(\"#%s\" % character_id)\n if character:\n self.remove(character[0])\n\n def remove(self, character):\n \"\"\"\n Remove a character from the queue.\n \"\"\"\n character_id = character.id\n\n if character_id in self.waiting_queue:\n self.waiting_queue.remove(character_id)\n\n if character_id in self.preparing:\n del self.preparing[character_id]\n\n character.msg({\"left_combat_queue\": \"\"})\n\n def match(self):\n \"\"\"\n Match opponents according to character's scores.\n The longer a character in the queue, the score is higher.\n The nearer of two character's rank, the score is higher.\n \"\"\"\n if len(self.waiting_queue) < 2:\n return\n\n # match characters by honour differences\n for i in range(len(self.waiting_queue) - 1):\n char_id_A = self.waiting_queue[i]\n if char_id_A in self.preparing:\n continue\n\n for j in range(i + 1, len(self.waiting_queue)):\n char_id_B = self.waiting_queue[j]\n if char_id_B in self.preparing:\n continue\n\n honour_A = HONOURS_MAPPER.get_honour_by_id(char_id_A, 0)\n honour_B = HONOURS_MAPPER.get_honour_by_id(char_id_B, 0)\n\n # max_honour_diff means no limits\n if self.max_honour_diff == 0 or math.fabs(honour_A - honour_B) <= self.max_honour_diff:\n # can match\n character_A = search_object(\"#%s\" % char_id_A)\n character_B = search_object(\"#%s\" % char_id_B)\n if character_A:\n character_A[0].msg({\"prepare_match\": self.preparing_time})\n if character_B:\n character_B[0].msg({\"prepare_match\": self.preparing_time})\n\n call_id = reactor.callLater(self.preparing_time, self.fight, (char_id_A, char_id_B))\n self.preparing[char_id_A] = {\n \"time\": time.time(),\n \"opponent\": char_id_B,\n \"confirmed\": False,\n \"call_id\": call_id,\n }\n self.preparing[char_id_B] = {\n \"time\": time.time(),\n \"opponent\": char_id_A,\n \"confirmed\": False,\n \"call_id\": call_id,\n }\n\n def confirm(self, character):\n \"\"\"\n Confirm an honour combat.\n \"\"\"\n character_id = character.id\n if character_id not in self.preparing:\n return\n \n self.preparing[character_id][\"confirmed\"] = True\n\n def reject(self, character):\n \"\"\"\n Reject an honour combat.\n \"\"\"\n character_id = character.id\n if character_id not in self.preparing:\n return\n\n # stop the call\n call_id = self.preparing[character_id][\"call_id\"]\n call_id.cancel()\n \n # remove characters from the preparing queue\n opponent_id = self.preparing[character_id][\"opponent\"]\n\n character = search_object(\"#%s\" % character_id)\n if character:\n character[0].msg({\"match_rejected\": character_id})\n del self.preparing[character_id]\n\n opponent = search_object(\"#%s\" % opponent_id)\n if opponent:\n opponent[0].msg({\"match_rejected\": character_id})\n del self.preparing[opponent_id]\n\n self.remove_by_id(character_id)\n\n def fight(self, opponents):\n \"\"\"\n Create a combat.\n \"\"\"\n confirmed0 = opponents[0] in self.preparing and self.preparing[opponents[0]][\"confirmed\"]\n confirmed1 = opponents[1] in self.preparing and self.preparing[opponents[1]][\"confirmed\"]\n\n if not confirmed0 and not confirmed1:\n self.remove_by_id(opponents[0])\n self.remove_by_id(opponents[1])\n\n opponent0 = search_object(\"#%s\" % opponents[0])\n opponent0[0].msg({\"match_rejected\": opponents[0],\n \"left_combat_queue\": \"\"})\n opponent1 = search_object(\"#%s\" % opponents[1])\n opponent1[0].msg({\"match_rejected\": opponents[1],\n \"left_combat_queue\": \"\"})\n elif not confirmed0:\n # opponents 0 not confirmed\n self.remove_by_id(opponents[0])\n if opponents[1] in self.preparing:\n del self.preparing[opponents[1]]\n\n opponent0 = search_object(\"#%s\" % opponents[0])\n opponent0[0].msg({\"match_rejected\": opponents[0],\n \"left_combat_queue\": \"\"})\n\n opponent1 = search_object(\"#%s\" % opponents[1])\n opponent1[0].msg({\"match_rejected\": opponents[0]})\n elif not confirmed1:\n # opponents 1 not confirmed\n self.remove_by_id(opponents[1])\n if opponents[0] in self.preparing:\n del self.preparing[opponents[0]]\n\n opponent1 = search_object(\"#%s\" % opponents[1])\n opponent1[0].msg({\"match_rejected\": opponents[1],\n \"left_combat_queue\": \"\"})\n\n opponent0 = search_object(\"#%s\" % opponents[0])\n opponent0[0].msg({\"match_rejected\": opponents[1]})\n elif confirmed0 and confirmed1:\n # all confirmed\n opponent0 = search_object(\"#%s\" % opponents[0])\n opponent1 = search_object(\"#%s\" % opponents[1])\n # create a new combat handler\n chandler = create_script(settings.HONOUR_COMBAT_HANDLER)\n # set combat team and desc\n chandler.set_combat(\n combat_type=CombatType.HONOUR,\n teams={1:[opponent0[0]], 2:[opponent1[0]]},\n desc=_(\"Fight of Honour\"),\n timeout=0\n )\n\n self.remove_by_id(opponents[0])\n self.remove_by_id(opponents[1])\n\n\n# main handler\nMATCH_COMBAT_HANDLER = MatchPVPHandler()\n","sub_path":"muddery/server/combat/match_pvp_handler.py","file_name":"match_pvp_handler.py","file_ext":"py","file_size_in_byte":8871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"138518529","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nfrom torchvision import transforms\n\nclass ResNet(nn.Module):\n r\"\"\"A Wrapper class to select the models to use for the visual encoding\n\n Takes in observations and produces an embedding of the rgb and/or depth components\n\n Args:\n observation_space: The observation_space of the agent\n output_size: The size of the embedding vector\n \"\"\"\n def __init__(self, observation_space):\n super().__init__()\n if \"rgb\" in observation_space.spaces:\n self._n_input_rgb = observation_space.spaces[\"rgb\"].shape[2]\n else:\n self._n_input_rgb = 0\n\n if \"depth\" in observation_space.spaces:\n self._n_input_depth = observation_space.spaces[\"depth\"].shape[2]\n else:\n self._n_input_depth = 0\n\n if self.is_blind:\n self.cnn = nn.Sequential()\n else:\n self.cnn = models.resnet152(pretrained=True)\n\n self.layer_init()\n\n def layer_init(self):\n self.cnn = nn.Sequential(*list(self.cnn.children())[:-1])\n for p in self.cnn.parameters():\n p.requires_grad = False\n\n @property\n def is_blind(self):\n return self._n_input_rgb + self._n_input_depth == 0\n\n def forward(self, observations):\n cnn_input = []\n\n # Normalization of the images\n preprocess = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]\n ),\n ])\n if self._n_input_rgb > 0:\n rgb_observations = observations[\"rgb\"]\n # permute tensor to dimension [BATCH x CHANNEL x HEIGHT X WIDTH]\n rgb_observations = rgb_observations.permute(0, 3, 1, 2)\n rgb_observations = preprocess(rgb_observations)\n cnn_input.append(rgb_observations)\n\n if self._n_input_depth > 0:\n depth_observations = observations[\"depth\"]\n # permute tensor to dimension [BATCH x CHANNEL x HEIGHT X WIDTH]\n depth_observations = depth_observations.permute(0, 3, 1, 2)\n depth_observations = preprocess(depth_observations)\n cnn_input.append(depth_observations)\n\n cnn_input = torch.cat(cnn_input, dim=1)\n\n with torch.no_grad():\n return self.cnn(cnn_input)\n","sub_path":"habitat_baselines/vln/models/visual_encoder.py","file_name":"visual_encoder.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"405632501","text":"from talon import Context, Module, actions, grammar\n\n# user-defined words that aren't matching in lexicon\nsimple_vocabulary = [\n \"Cisco\",\n \"Citrix\",\n \"DNS\",\n \"VPN\",\n \"admin\",\n \"afl\",\n \"alloc\",\n \"asa\",\n \"binja\",\n \"blah\",\n \"byte\",\n \"bytes\",\n \"cedric\",\n \"cert\",\n \"cfg\",\n \"cve\",\n \"daemon\",\n \"debbi\",\n \"dll\",\n \"dlmalloc\",\n \"docker\",\n \"dotfiles\",\n \"ecdsa\",\n \"edg\",\n \"errno\",\n \"exim\",\n \"fastbin\",\n \"firefox\",\n \"freebsd\",\n \"fuzz\",\n \"fuzzer\",\n \"ghidra\",\n \"github\",\n \"hal\",\n \"hexdump\",\n \"ida\",\n \"idarling\",\n \"ios\",\n \"kk\",\n \"lambda\",\n \"malloc\",\n \"meta\",\n \"metasploit\",\n \"minecraft\",\n \"mplayer\",\n \"mscope\",\n \"ncc group\",\n \"neovim\",\n \"netbsd\",\n \"nmap\",\n \"openbsd\",\n \"patreon\",\n \"pfsense\",\n \"poc\",\n \"ptmalloc\",\n \"pwn\",\n \"relro\",\n \"rootkit\",\n \"rop\",\n \"rrsp\",\n \"rsa\",\n \"shellcode\",\n \"ssh\",\n \"strmixalot\",\n \"tcache\",\n \"tfsa\",\n \"vim\",\n \"vimrc\",\n \"vmware\",\n \"vimium\",\n \"yammer\",\n \"sys\",\n \"argv\",\n \"parser\",\n \"gitlab\",\n \"wisp\",\n \"vimvixen\",\n \"admin\",\n \"debug\",\n \"debian\",\n \"aenea\",\n \"edit\",\n \"auto\",\n \"modules\",\n \"buf\",\n \"args\",\n \"parse\",\n \"var\",\n \"arena\",\n \"main\",\n \"scroll\",\n \"scrolling\",\n \"fastbin\",\n \"console\",\n \"integer\",\n \"pentest\",\n \"Aaron\",\n \"tmux\",\n \"keying\",\n \"tool\",\n \"exe\",\n \"unix\",\n \"buffer\",\n \"ncc\",\n \"nccgroup\",\n \"draft\",\n \"donut\",\n \"insert\",\n \"payload\",\n \"disk\",\n \"diskless\",\n \"loader\",\n \"ascii\",\n \"disk\",\n \"markdown\",\n \"BSD\",\n \"bool\",\n \"keying\",\n \"env\",\n \"tags\",\n \"PE\",\n \"raw\",\n \"page\",\n \"add\",\n \"octet\",\n \"dev\",\n \"calc\",\n \"close\",\n \"gandi\",\n \"memset\",\n \"polybar\",\n \"yay\",\n \"buku\",\n \"tech\",\n \"hover\",\n \"davmail\",\n \"break\",\n \"pico\",\n \"add\",\n \"giffed\",\n \"gif\",\n \"LUKS\",\n \"able\",\n \"metasploit\",\n \"mod\",\n \"most\",\n \"mouse\",\n \"timeout\",\n \"array\",\n \"arrays\",\n \"ping\",\n \"stellaris\",\n \"config\",\n \"make\",\n \"stub\",\n \"stubs\",\n]\n\nmapping_vocabulary = {\n \"and u s kernel\": \"ntoskrnl\",\n \"as break\": \"sbrk\",\n \"as p one\": \"sp1\",\n \"as p three\": \"sp3\",\n \"as p to\": \"sp2\",\n \"base sixty four\": \"base64\",\n \"colonel\": \"kernel\",\n \"damon\": \"daemon\",\n \"din dns\": \"dynDNS\",\n \"dot b s s\": \".bss\",\n \"dot data\": \".data\",\n \"dot text\": \".text\",\n \"drawio\": \"draw.io\",\n \"em protect\": \"mprotect\",\n \"ex ease\": \"exes\",\n \"ex ee\": \"exe\",\n \"fast bin\": \"fastbin\",\n \"foss\": \"fuzz\",\n \"frack\": \"phrack\",\n \"gee lib see\": \"glibc\",\n \"hack stump\": \"hexdump\",\n \"he low\": \"helo\",\n \"heck stump\": \"hexdump\",\n \"her go dogs\": \"ergodox\",\n \"hex raise\": \"hexrays\",\n \"higher key\": \"heirarchy\",\n \"i low\": \"ilo\",\n \"i three wm\": \"i3wm\",\n \"i three\": \"i3\",\n \"i\": \"I\",\n \"i'd\": \"I'd\",\n \"i'll\": \"I'll\",\n \"i'm\": \"I'm\",\n \"i've\": \"I've\",\n \"lib heap\": \"libheap\",\n \"lib see\": \"libc\",\n \"look aside\": \"lookaside\",\n \"ma map\": \"mmap\",\n \"no prob\": \"np\",\n \"of by one\": \"off by one\",\n \"parky\": \"poccy\",\n \"pound bag\": \"pwndbg\",\n \"rob\": \"rop\",\n \"shaw one\": \"sha1\",\n \"sixty for bit\": \"64-bit\",\n \"steer makes a lot\": \"strmixalot\",\n \"stir copy\": \"strcpy\",\n \"tay yo\": \"teo\",\n \"tea cash\": \"tcache\",\n \"thirty too bit\": \"32-bit\",\n \"two key eight\": \"2k8\",\n \"two key nineteen\": \"2k19\",\n \"two key sixteen\": \"2k16\",\n \"two key three\": \"2k3\",\n \"two key twelve\": \"2k12\",\n \"utt fight\": \"utf-8\",\n \"win thirty two k\": \"win32k\",\n \"win two key eight\": \"win2k8\",\n \"win two key nineteen\": \"win2k19\",\n \"win two key sixteen\": \"win2k16\",\n \"win two key three\": \"win2k3\",\n \"win two key twelve\": \"win2k12\",\n \"wind bag\": \"windbg\",\n \"ex eighty six\": \"x86\",\n \"ax eighty six\": \"x86\",\n \"a city six\": \"x86\",\n \"ex sixty four\": \"x64\",\n \"a sixty four\": \"x64\",\n \"ax sixty four\": \"x64\",\n \"key pass\": \"keepass\",\n \"eye three\": \"i3\",\n \"an am cli\": \"nmcli\",\n \"petty chunk\": \"ptchunk\",\n \"ped chunk\": \"ptchunk\",\n \"arg v\": \"argv\",\n \"arcpurse\": \"argparse\",\n \"arg purse\": \"argparse\",\n \"hedra\": \"ghidra\",\n \"heedra\": \"ghidra\",\n \"double you get\": \"wget\",\n \"pep eight\": \"pep8\",\n \"debbie an\": \"debian\",\n \"anne\": \"aenea\",\n \"all t snips\": \"ultisnips\",\n \"tcp dump\": \"tcpdump\",\n \"I notify\": \"inotify\",\n \"de bug\": \"debug\",\n \"buf her\": \"buffer\",\n \"head her\": \"header\",\n \"help her\": \"helper\",\n \"see seeing\": \"cc'ing\",\n \"ex ee\": \"exe\",\n \"xiii\": \"exe\",\n \"windows ten\": \"windows 10\",\n \"windows seven\": \"windows \",\n \"ncc group\": \"nccgroup\",\n \"ex or\": \"xor\",\n \"sea sharp\": \"c#\",\n \"sea file\": \"c file\",\n \"in cert\": \"insert\",\n \"sand box\": \"sandbox\",\n \"use her\": \"user\",\n \"pentest her\": \"pentester\",\n \"test her\": \"tester\",\n \"asked I\": \"ascii\",\n \"ask I\": \"ascii\",\n \"get ignore\": \".gitignore\",\n \"data tapes\": \"datatypes\",\n \"e numb\": \"enum\",\n \"king\": \"keying\",\n \"do main\": \"domain\",\n \"eye pee\": \"IP\",\n \"pee e\": \"PE\",\n \"arm sixty four\": \"ARM64\",\n \"arm thirty two\": \"ARM32\",\n \"dot ex e\": \".exe\",\n \"desk top\": \"desktop\",\n \"dot net\": \".NET\",\n \"etcetera\": \", etc.\",\n \"I all\": \"hi all\",\n \"windbag\": \"windbg\",\n \"bite\": \"byte\",\n \"bites\": \"bytes\",\n \"doughnut\": \"donut\",\n \"jiffed\": \"giffed\",\n \"jiff\": \"gif\",\n \"lux\": \"LUKS\",\n \"gooey\": \"gui\",\n \"vest where\": \"vmware\",\n \"lamby\": \"lambai\",\n \"four matters\": \"formatters\",\n \"meta exploit\": \"metasploit\",\n \"toby\": \"tobii\",\n \"you id\": \"UUID\",\n \"goo id\": \"GUID\",\n # weird common typo\n \"dolores\": \"stellaris\",\n \"sinology\": \"synology\",\n \"and or\": \"and/or\",\n}\n\n# Add single words here if Talon recognizes them, but they need to have their\n# capitalization adjusted.\ncapitalize = [\n \"I\",\n \"I'm\",\n \"I've\",\n \"I'll\",\n \"I'd\",\n \"Monday\",\n \"Mondays\",\n \"Tuesday\",\n \"Tuesdays\",\n \"Wednesday\",\n \"Wednesdays\",\n \"Thursday\",\n \"Thursdays\",\n \"Friday\",\n \"Fridays\",\n \"Saturday\",\n \"Saturdays\",\n \"Sunday\",\n \"Sundays\",\n \"January\",\n \"February\",\n # March omitted because it's a regular word too\n \"April\",\n # May omitted because it's a regular word too\n \"June\",\n \"July\",\n \"August\",\n \"September\",\n \"October\",\n \"November\",\n \"December\",\n]\n\n# Add single words here if Talon recognizes them, but they need to have their\n# spelling adjusted.\nword_map = {\n # For example:\n # \"color\": \"colour\",\n}\nword_map.update({x.lower(): x for x in capitalize})\n\n# Add words (or phrases you want treated as words) here if Talon doesn't\n# recognize them at all.\nsimple_vocabulary = [\"nmap\", \"admin\", \"Cisco\", \"Citrix\", \"VPN\", \"DNS\", \"minecraft\"]\n\n# Add vocabulary words (or phrases you want treated as words) here that aren't\n# recognized by Talon and are written differently than they're pronounced.\nmapping_vocabulary = {\n # For example:\n # \"enn map\": \"nmap\",\n # \"under documented\": \"under-documented\",\n}\nmapping_vocabulary.update(dict(zip(simple_vocabulary, simple_vocabulary)))\n\n\nmod = Module()\n\n\n@mod.capture(rule=\"{user.vocabulary}\")\ndef vocabulary(m) -> str:\n return m.vocabulary\n\n\n@mod.capture(rule=\"( | )\")\ndef word(m) -> str:\n try:\n return m.vocabulary\n except AttributeError:\n # TODO: if the word is both a regular word AND user.vocabulary, then in\n # principle it may parse as instead; we ought to pass it through\n # mapping_vocabulary to be sure. But we should be doing that in\n # user.text, below, too.\n words = actions.dictate.replace_words(actions.dictate.parse_words(m.word))\n assert len(words) == 1\n return words[0]\n\n\npunctuation = set(\".,-!?;:\")\n\n\n@mod.capture(rule=\"( | )+\")\ndef text(m) -> str:\n words = []\n for item in m:\n if isinstance(item, grammar.vm.Phrase):\n words.extend(\n actions.dictate.replace_words(actions.dictate.parse_words(item))\n )\n else:\n words.extend(item.split(\" \"))\n\n result = \"\"\n for i, word in enumerate(words):\n if i > 0 and word not in punctuation and words[i - 1][-1] not in (\"/-(\"):\n result += \" \"\n result += word\n return result\n\n\nmod.list(\"vocabulary\", desc=\"user vocabulary\")\n\nctx = Context()\n\n# dictate.word_map is used by actions.dictate.replace_words to rewrite words\n# Talon recognized. Entries in word_map don't change the priority with which\n# Talon recognizes some words over others.\nctx.settings[\"dictate.word_map\"] = word_map\n\n# user.vocabulary is used to explicitly add words/phrases that Talon doesn't\n# recognize. Words in user.vocabulary (or other lists and captures) are\n# \"command-like\" and their recognition is prioritized over ordinary words.\nctx.lists[\"user.vocabulary\"] = mapping_vocabulary\n","sub_path":"code/vocabulary.py","file_name":"vocabulary.py","file_ext":"py","file_size_in_byte":9002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"133105886","text":"\n\nfrom xai.brain.wordbase.nouns._stamen import _STAMEN\n\n#calss header\nclass _STAMENS(_STAMEN, ):\n\tdef __init__(self,): \n\t\t_STAMEN.__init__(self)\n\t\tself.name = \"STAMENS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"stamen\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_stamens.py","file_name":"_stamens.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"124276838","text":"import math\nimport matplotlib\nimport numpy as np\nimport pandas as pd\n\nimport time\nfrom stockstats import StockDataFrame as Sdf\n\nfrom datetime import date\nfrom matplotlib import pyplot as plt\nfrom numpy.random import seed\n\nfrom sklearn.metrics import mean_squared_error\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.externals import joblib\nfrom keras.models import load_model\n\n\nimport tensorflow.python.keras.backend as K\nimport tensorflow\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.models import Sequential, load_model\nfrom tensorflow.keras.layers import Dense, LSTM\nfrom tensorflow.keras.layers import Dropout\nimport yfinance as yfsss\n\n#### Input params ##################\n#start_date = '2017-01-01'\n#end_date = '2020-04-23'\n#stk_path = yf.download(\"INFY\", start_date,end_date, prepost=True, rounding=True)\nstk_path = pd.read_csv(r\"E:\\Project\\Stock_Prediction\\ui\\public\\WIPRO.csv\")\n\ntest_size = 0.2 # proportion of dataset to be used as test set\ncv_size = 0.2 # proportion of dataset to be used as cross-validation set\nN = 9\nlstm_units = 50 # lstm param. initial value before tuning.\ndropout_prob = 0.5 # lstm param. initial value before tuning.\noptimizer = 'adam' # lstm param. initial value before tuning.\nepochs = 10 # lstm param. initial value before tuning.\nbatch_size = 1\nmodel_seed = 100\nfontsize = 14\nticklabelsize = 14\n# Set seeds to ensure same output results\nseed(101)\ntensorflow.random.set_seed(model_seed)\n\n\ndef get_mape(y_true, y_pred):\n \"\"\"\n Compute mean absolute percentage error (MAPE)\n \"\"\"\n y_true, y_pred = np.array(y_true), np.array(y_pred)\n return np.mean(np.abs((y_true - y_pred) / y_true)) * 100\n\n\ndef get_x_y(data, N, offset):\n \"\"\"\n\n Split data into x (features) and y (target)\n \"\"\"\n x, y = [], []\n for i in range(offset, len(data)):\n x.append(data[i-N:i])\n y.append(data[i])\n x = np.array(x)\n y = np.array(y)\n\n return x, y\n\n\ndef get_x_scaled_y(data, N, offset):\n \"\"\"\n Split data into x (features) and y (target)\n We scale x to have mean 0 and std dev 1, and return this.\n We do not scale y here.\n Inputs\n data : pandas series to extract x and y\n N\n offset\n Outputs\n x_scaled : features used to predict y. Scaled such that each element has mean 0 and std dev 1\n y : target values. Not scaled\n mu_list : list of the means. Same length as x_scaled and y\n std_list : list of the std devs. Same length as x_scaled and y\n \"\"\"\n x_scaled, y, mu_list, std_list = [], [], [], []\n for i in range(offset, len(data)):\n mu_list.append(np.mean(data[i-N:i]))\n std_list.append(np.std(data[i-N:i]))\n x_scaled.append((data[i-N:i]-mu_list[i-offset])/std_list[i-offset])\n y.append(data[i])\n x_scaled = np.array(x_scaled)\n y = np.array(y)\n return x_scaled, y, mu_list, std_list\n\n\ndf = stk_path\nDate = df.index\ndf.reset_index(drop=False, inplace=True)\ndf.loc[:, 'Date'] = pd.to_datetime(df['Date'], format='%Y-%m-%d')\ndf.columns = [str(x).lower().replace(' ', '_') for x in df.columns]\ndf['month'] = df['date'].dt.month # Get month of each sample\n# Sort by datetime\ndf.sort_values(by='date', inplace=True, ascending=True)\n\nnum_cv = int(cv_size*len(df))\nnum_test = int(test_size*len(df))\nnum_train = len(df) - num_cv - num_test\ntrain = df[:num_train][['date', 'adj_close']]\ncv = df[num_train:num_train+num_cv][['date', 'adj_close']]\ntrain_cv = df[:num_train+num_cv][['date', 'adj_close']]\ntest = df[num_train+num_cv:][['date', 'adj_close']]\n\n\n# Converting dataset into x_train and y_train\n# Here we only scale the train dataset, and not the entire dataset to prevent information leak\nscaler = StandardScaler()\ntrain_scaled = scaler.fit_transform(\n np.array(train['adj_close']).reshape(-1, 1))\n# Split into x and y\nx_train_scaled, y_train_scaled = get_x_y(train_scaled, N, N)\n# Scale the cv dataset\n# Split into x and y\nx_cv_scaled, y_cv, mu_cv_list, std_cv_list = get_x_scaled_y(\n np.array(train_cv['adj_close']).reshape(-1, 1), N, num_train)\n# Here we scale the train_cv set, for the final model\nscaler_final = StandardScaler()\ntrain_cv_scaled_final = scaler_final.fit_transform(\n np.array(train_cv['adj_close']).reshape(-1, 1))\n\n\n# Optimized parameters\nN_opt = 60\nlstm_units_opt = 128\ndropout_prob_opt = 0.5\nepochs_opt = 10\nbatch_size_opt = 8\noptimizer_opt = 'adam'\nx_train_cv_scaled, y_train_cv_scaled = get_x_y(\n train_cv_scaled_final, N_opt, N_opt)\n\n# Split test into x and y\nx_test_scaled, y_test, mu_test_list, std_test_list = get_x_scaled_y(\n np.array(df['adj_close']).reshape(-1, 1), N_opt, num_train+num_cv)\n\nmodel = Sequential()\nmodel.add(LSTM(units=lstm_units_opt, return_sequences=True,\n input_shape=(x_train_cv_scaled.shape[1], 1)))\nmodel.add(Dropout(dropout_prob_opt)) # Add dropout with a probability of 0.5\nmodel.add(LSTM(units=lstm_units_opt))\nmodel.add(Dropout(dropout_prob_opt)) # Add dropout with a probability of 0.5\nmodel.add(Dense(1))\n\n# Compile and fit the LSTM network\nmodel.compile(loss='mean_squared_error', optimizer=optimizer_opt)\nmodel.fit(x_train_cv_scaled, y_train_cv_scaled,\n epochs=epochs_opt, batch_size=batch_size_opt, verbose=0)\n# joblib.dump(model,'lstm_1.joblib')\n\n# model1=joblib.load('lstm_1.joblib')\nmodel.save(f'Wipro_model.h5')\nmodel1 = load_model('Wipro_model.h5')\n\nest_scaled = model1.predict(x_test_scaled)\nest = (est_scaled * np.array(std_test_list).reshape(-1, 1)) + \\\n np.array(mu_test_list).reshape(-1, 1)\n\n\n#rmse = math.sqrt(mean_squared_error(y_test, est))\n#mape = get_mape(y_test, est)\n\n# USED FOR RECOMMENDATION\n\ndf_recommend = pd.DataFrame({'close': est.reshape(-1),\n 'date': df[num_train+num_cv:]['date']})\nstock_rec = Sdf.retype(df_recommend)\nsignal = stock_rec['macds'] # signal line\nmacd = stock_rec['macd'] # macd line\n# MACD histogram\nmacdhist = stock_rec['macdh']\nrsi_sig = stock_rec['rsi_6']\n\n# Since you need at least two days in the for loop\nlistLongShort = [\"No data\"]\n\nfor i in range(1, len(signal)):\n\n # macd crooses upward(sig)\n if macd[i] > signal[i] and macd[i - 1] <= signal[i - 1]:\n listLongShort.append(\"BUY\")\n # # The other way around\n elif macd[i] < signal[i] and macd[i - 1] >= signal[i - 1]:\n listLongShort.append(\"SELL\")\n # # Do nothing if not crossed\n else:\n listLongShort.append(\"HOLD\")\n\nstock_rec['Advice_macd'] = listLongShort\n\n# The advice column means \"Buy/Sell/Hold\" at the end of this day or\n# at the beginning of the next day, since the market will be closed\n\nlistLongShort = [\"No data\"]\n\nfor i in range(1, len(rsi_sig)):\n #\n if rsi_sig[i] < 30:\n listLongShort.append(\"BUY\")\n # # The other way around\n elif rsi_sig[i] > 70:\n listLongShort.append(\"SELL\")\n # # Do nothing if not crossed\n else:\n listLongShort.append(\"HOLD\")\n\nstock_rec['Advice_rsi'] = listLongShort\nprint(stock_rec)\n","sub_path":"service/Wipromodel_generator.py","file_name":"Wipromodel_generator.py","file_ext":"py","file_size_in_byte":7191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"548105511","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport Tkinter as tk\n\ntag_num = {}\nattr_dict = {}\ninstance_list = []\nID_dict = {}\nentry_dict = {}\n\n# タグと属性を書いたオブジェクトを配置する\n# 座標は適当\ndef place(instance):\n instance.tag_rb.place(x=900,y=instance.mY)\n\n \ndef read_attribute_and_value():\n attr_value = open(\"resource/attribute_value\", \"r\")\n for line in attr_value:\n if len(line.split(\",\")) > 1:\n string = line.replace(\"\\n\", \"\").split(\",\")\n string.pop(0)\n attr_dict[line.split(\",\")[0]] = string\n \n\ndef get_id():\n return ID_dict\n\n\ndef get_entry():\n return entry_dict\n\n\n# タグが選択された時、その属性と値を記述するウィジェットを配置する\ndef tag_selected(tag):\n global instance_list, ID_dict, entry_dict\n # ウィジェットを削除するメソッドがないの意味不明じゃない?\n # ウィジェットのインスタンスを削除できないので画面外に置いてなかったことにする\n # メモリが死にそう\n for elem in instance_list:\n elem.place(x=1000000,y=1000000)\n instance_list = []\n ID_dict = {}\n entry_dict = {}\n \n i = 0\n for elem in tag.mAttribute:\n label = tk.Label(text=elem,width=13)\n label.place(x=10+i*120,y=450)\n instance_list.append(label)\n\n if elem in attr_dict.keys():\n ID = tk.StringVar()\n ID.set(\"null\")\n ID_dict[elem] = ID\n j = 0\n for v in attr_dict[elem]:\n rb = tk.Radiobutton(text=v,variable=ID,value=v)\n rb.place(x=10+i*120,y=475+j*25)\n instance_list.append(rb)\n j += 1\n else:\n entry = tk.Entry(width=12)\n entry.place(x=10+i*120,y=475)\n entry_dict[elem] = entry\n instance_list.append(entry)\n i += 1\n\n \nclass TagRB:\n # タグのラジオボタンに関するクラス\n def __init__(self, tag_list, tagID, ID):\n # 初期化\n self.mTagName = tag_list[0]\n self.mTag = tag_list[1]\n self.mY = 10 + ID * 25\n \n self.mAttribute = []\n i = 2\n while i < len(tag_list):\n self.mAttribute.append(tag_list[i])\n i += 1\n\n global tag_num\n tag_num[self.mTag] = 1\n\n global attr_dict\n read_attribute_and_value()\n\n #インスタンス生成\n self.tag_rb = tk.Radiobutton(text=self.mTagName,variable=tagID,value=ID)\n\n","sub_path":"Tag.py","file_name":"Tag.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"291309231","text":"\"\"\"\r\nSimple command line utility to stream the parsed UBX output of a u-blox GNSS device.\r\n\r\nUsage (all args are optional):\r\nubxdump.py port=\"COM13\" baud=9600 timeout=5 ubx_only=0 raw=0\r\n\r\nIf ubxonly=True (1), streaming will terminate on any non-UBX data (e.g. NMEA).\r\n\"\"\"\r\n\r\nimport sys\r\nfrom serial import Serial\r\nfrom pyubx2 import UBXReader\r\n\r\nPORT = \"COM13\"\r\nBAUD = 9600\r\nTIMEOUT = 5\r\n\r\n\r\ndef stream_ubx(**kwargs):\r\n \"\"\"\r\n Stream output\r\n \"\"\"\r\n\r\n try:\r\n port = kwargs.get(\"port\", PORT).strip('\"')\r\n baud = int(kwargs.get(\"baud\", BAUD))\r\n timeout = int(kwargs.get(\"timeout\", TIMEOUT))\r\n ubxonly = int(kwargs.get(\"ubxonly\", 0))\r\n rawformat = int(kwargs.get(\"raw\", 0))\r\n print(\r\n f\"\\nStreaming from {port} at {baud} baud in\",\r\n f\"{'raw' if rawformat else 'parsed'} format...\\n\",\r\n )\r\n stream = Serial(port, baud, timeout=timeout)\r\n ubr = UBXReader(stream, ubxonly=ubxonly)\r\n for (raw, parsed) in ubr:\r\n if rawformat:\r\n print(raw)\r\n else:\r\n print(parsed)\r\n except KeyboardInterrupt:\r\n print(\"\\nStreaming terminated by user\\n\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n if len(sys.argv) > 1:\r\n if sys.argv[1] in {\"-h\", \"--h\", \"help\", \"-help\", \"--help\", \"-H\"}:\r\n print(\r\n \" ubxdump.py is a simple command line utility to stream\",\r\n \"the parsed UBX output of a u-blox GNSS device.\\n\\n\",\r\n \"Usage (all args are optional): ubxdump.py\",\r\n 'port=\"COM13\" baud=9600 timeout=5',\r\n \"ubxonly=0 raw=0\\n\\n Type Ctrl-C to terminate.\",\r\n )\r\n sys.exit()\r\n\r\n stream_ubx(**dict(arg.split(\"=\") for arg in sys.argv[1:]))\r\n","sub_path":"examples/ubxdump.py","file_name":"ubxdump.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"294052525","text":"import pygame\nimport helpers\nfrom abstractscreen import AbstractScreen\n\n# Behaves like the actual LED screen, but shows the screen content on a computer screen\nclass VirtualScreen(AbstractScreen):\n\tdef __init__(self, width = 16, height = 16, led_pin = 18, led_freq_hz = 800000, led_dma = 5, led_invert = False, led_brightness = 200):\t\t\n\t\tsuper(VirtualScreen, self).__init__(width, height)\n\t\tself.pixel_size = 30\n\t\t\n\t\tpygame.display.init()\n\t\tself.screen = pygame.display.set_mode((width * self.pixel_size,\n\t\t\t\t\t\t\t\t\t\t\t height * self.pixel_size),\n\t\t\t\t\t\t\t\t\t\t\t pygame.NOFRAME)\n\n\t\tself.surface = pygame.Surface(self.screen.get_size())\t\n\n\tdef update(self):\n\t\tfor y in range(self.height):\n\t\t\tfor x in range(self.width):\n\t\t\t\t#colors are in GRB format on the LED strip, to display properly we need to convert to a RGB tuple\n\t\t\t\tadjusted_color = helpers.int_to_rgb_color(self.pixel[x][y])\n\t\t\t\tpygame.draw.rect(self.surface, adjusted_color, ((x * self.pixel_size, y * self.pixel_size), (((x+1) * self.pixel_size), (y+1) * self.pixel_size)))\n\n\t\tself.screen.blit(self.surface, (0, 0))\n\t\tpygame.display.flip()\n\t\tpygame.display.update()","sub_path":"screen/virtualscreen.py","file_name":"virtualscreen.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"252773259","text":"import argparse\nimport vcf\nimport pandas as pd\n\ndef read_ivar(filename):\n ivar_calls = pd.read_csv(filename, sep='\\t')\n ivar_calls[\"Variant\"] = ivar_calls.apply(lambda row: \n str(row[\"POS\"]) + \\\n str(row[\"REF\"]) + \">\" + \\\n str(row[\"ALT\"]), axis=1)\n ivar_calls = ivar_calls.drop_duplicates(subset=[\"Variant\"])\n\n return ivar_calls\n\n\ndef read_lofreq(filename):\n lofreq_calls = pd.DataFrame(columns=[\"CHROM\", \"POS\", \"REF\", \"ALT\", \"QUAL\", \n \"REF_DP\", \"REF_RV\", \"ALT_DP\", \"ALT_RV\",\n \"ALT_FREQ\", \"TOTAL_DP\"])\n vcf_reader = vcf.Reader(filename=filename)\n for row in vcf_reader:\n lofreq_calls = lofreq_calls.append({\"CHROM\": row.CHROM, \n \"POS\": int(row.POS),\n \"REF\": row.REF,\n \"ALT\": row.ALT[0],\n \"QUAL\": row.QUAL, \n \"REF_DP\": row.INFO[\"DP4\"][0] + row.INFO[\"DP4\"][1], \n \"REF_RV\": row.INFO[\"DP4\"][1],\n \"ALT_DP\": row.INFO[\"DP4\"][2] + row.INFO[\"DP4\"][3],\n \"ALT_RV\": row.INFO[\"DP4\"][3],\n \"ALT_FREQ\": row.INFO[\"AF\"],\n \"TOTAL_DP\": row.INFO[\"DP\"],\n }, \n ignore_index=True)\n\n lofreq_calls[\"Variant\"] = lofreq_calls.apply(lambda row: \n str(row[\"POS\"]) + \\\n str(row[\"REF\"]) + \">\" + \\\n str(row[\"ALT\"]), axis=1)\n return lofreq_calls\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Variant call merging for LoFreq and iVar\")\n parser.add_argument(\"-m\", \"--min-af\", type=float, default=0.02, \n help=\"Minimum allele frequncy of the variants (between 0 and 1)\",\n required=True)\n parser.add_argument(\"-p\", \"--pass-only\", action=\"store_true\",\n help=\"Only retain vairants with p-value <= 0.05 from iVar\",\n required=False)\n parser.add_argument(\"ivar_input\", type=str, action=\"store\",\n help=\"Path to the iVar output .tsv\")\n parser.add_argument(\"lofreq_input\", type=str, action=\"store\",\n help=\"Path to the LoFreq output .vcf\")\n parser.add_argument(\"-o\", \"--output\", type=str, default=\"output.tsv\",\n help=\"Name of the output file for merged calls\")\n args = parser.parse_args()\n\n ivar_calls = read_ivar(args.ivar_input)\n lofreq_calls = read_lofreq(args.lofreq_input)\n merged_calls = lofreq_calls.merge(ivar_calls, on=[\"Variant\"], suffixes=(\"_LoFreq\", \"_iVar\"))\n filtered_merged_calls = merged_calls[~((merged_calls[\"ALT_FREQ_iVar\"] < args.min_af) & \\\n (merged_calls[\"ALT_FREQ_LoFreq\"] < args.min_af))]\n if args.pass_only:\n filtered_merged_calls = filtered_merged_calls[filtered_merged_calls[\"PASS\"] == True]\n\n filtered_merged_calls.to_csv(args.output, sep=\"\\t\", index=False)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"module3/MergeCalls.py","file_name":"MergeCalls.py","file_ext":"py","file_size_in_byte":3378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"339104755","text":"import asyncio, aiohttp, config, re\nfrom bs4 import BeautifulSoup\nfrom requests.compat import urljoin\nimport json\n\nasync def fetch_url(session, url):\n async with session.get(url) as response:\n return await response.text()\n\n\nasync def fetch_many_urls(session, urls):\n return await asyncio.gather(*[fetch_url(session, url) for url in urls],\n return_exceptions=True)\n\n\nasync def main():\n async with aiohttp.ClientSession() as session:\n response_text = await fetch_url(\n session=session,\n url=config.base_site\n )\n await session.close()\n soup = BeautifulSoup(response_text, 'html.parser')\n data = {}\n\n # get roster links\n async with aiohttp.ClientSession() as session:\n rosters = await fetch_many_urls(\n session=session,\n urls=(urljoin(config.base_site, roster.get('href')) for roster in soup.find_all('a', text=re.compile('Roster')))\n )\n await session.close()\n\n # get player webpage links\n async with aiohttp.ClientSession() as session:\n for roster in rosters:\n soup = BeautifulSoup(roster, 'html.parser')\n team = soup.select('#sub-branding > h2 > a > b')[0].text.split()[-1]\n\n data[team] = {}\n\n async for tr in soup.find_all('a').get('href'): \n data[team][tr.find_all('td')[1].a.text] = asyncio.ensure_future(\n fetch_url( \n session=session,\n url=tr.find_all('td')[1].a.get('href')\n )\n )\n print(json.dumps(data))\n\nif __name__=='__main__':\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main())\n loop.close()\n\n","sub_path":"scraper/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"269437878","text":"'''\nReverse Number is a number which is the same when reversed.\n\nFor Example;\n 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 22, 33, 44, 55, 66, 77, 88, 99, 101 => First 20 Reverse Numbers\n \nTASK:\nYou need to return the nth reverse number. (Assume that reverse numbers start from 0 as shown in the example.)\n\nNOTES:\n0 < n <= 1000000\n'''\n\n\ndef find_reverse_number(n):\n # 10 numbers from 0-9\n if n <= 10:\n return n-1\n # 9 number from 11-99\n if n <= 19:\n return int(str(n-10)*2)\n # other cases\n else:\n # -1 to exlucde 0 and start from 1\n # then -1 more to set order start from 1, not 0\n n -= 2\n # set number of digits = 1\n k = 1\n # count reserve numbers with k-digit = 9*10**((k-1)//2)\n # loop to count number of digits (k) and position (n) of n-th number\n while n > 9*10**((k-1)//2):\n n -= 9*10**((k-1)//2)\n k += 1\n # length of symetric half of palindrome number\n print(n,k)\n length = (k+1)//2\n # split position n to list of digits\n ls = [int(x) for x in str(n)]\n num = \"\"\n # fill list of digits to avoid error when loop\n while len(ls) != length:\n ls.insert(0, 0)\n # loop to get final number\n for i in range(length-1, -1, -1):\n # middle digit(s) of number\n if i == length-1:\n # if number of digits of number is odd\n if k % 2 != 0:\n num = str(ls[i])\n # if number of digits of number is odd\n else:\n num = str(ls[i])*2\n # first and last digits of number\n elif i == 0:\n num = str(ls[i]+1)+num+str(ls[i]+1)\n # other digits\n else:\n num = str(ls[i])+num+str(ls[i])\n # convert to int\n return int(num)\n\n# thuật toán là tìm số chữ số k của số thứ n, và vị trí của nó tính từ số palindrome đầu tiên với k chữ số\n# số palindrome thứ n tính từ số palindrome đầu tiên với k chữ số có thể được tính bằng vị trí của nó\n# chẳng hạn n ban đầu là 1001 -> k=5 và n=801 tính từ số palindrome đầu tiên có 5 chữ số (10001)\n# 1 số palindrome 5 chữ số có dạng \"xyzyz\" với x=8+1, y=0, z=1 theo vị trí của nó\n# => số đó là 90109","sub_path":"6kyu/Find_the_nth_Reverse_Number.py","file_name":"Find_the_nth_Reverse_Number.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"437110608","text":"#!/usr/bin/env python\r\n\"\"\" PIPE STEP LOAD AUXILIARY FILE - Version 1.0.0\r\n\r\n This module provides a pipe step with a simple mechanism to search\r\n auxiliary files and match them to input data. An initial list of\r\n potential aux files is determined from a path/filename string\r\n (a glob), the final file(s) is determined by matching header\r\n keywords between the auxiliary files and the step input data.\r\n \r\n @author: berthoud\r\n \r\n To check:\r\n - check from MI step and SI step\r\n - make sure it works with any pipedata type\r\n To Change:\r\n OK Test function with environment: 1 input file, 3 bias and 3 dark files (fudge hdr keys)\r\n OK make a test object to play with, feed it input file and look what it finds to fit it\r\n OK Loadauxsetup: run it multiple time and loadauxfile have auxfilepar option\r\n - Also update fitkeys to %s(fit)keys %sbkup %sfile\r\n OPEN QUESTION: change all or just fitkeys? ANSWER: %sfile, bkup%s, %sfitkeys (with fitskeys as fallback)\r\n OK Loadauxfile: set up with auxfilepar option\r\n OK loadauxfile() and loadauxname() for returning filenames only\r\n - copy all code and update\r\n - update loadauxfile\r\n - test both\r\n - Loadauxfile: set up with multiple option\r\n - Changes to loadauxname: namelist/single option\r\n - idea: load all hdus then loop through keys and shorten list with each run\r\n from back to front and don't delete last file.\r\n - first load all headers\r\n - loop through all keywords (oldlist and newlist)\r\n end if all keys checked or break if len(newlist) == 0\r\n - loadauxfile: for filelist/single option\r\n - loadauxname() with option to give range for auxdaterange (fraction of day)\r\n \r\n Future Development:\r\n - Function to load aux files w/o parameters (if needed)\r\n\"\"\"\r\n\r\nimport os # os library\r\nimport glob # glob library\r\nimport string # for string.join\r\nimport time # time library\r\nfrom drp.dataparent import DataParent # Pipeline Data object\r\nfrom drp.stepparent import StepParent\r\nfrom drp.stepmiparent import StepMIParent # To check if we have datain or [datain, datain, ...]\r\n#from docutils.parsers import null\r\n\r\nclass StepLoadAux(StepParent):\r\n \"\"\" HAWC Pipeline Step Parent Object\r\n The object is callable. It requires a valid configuration input\r\n (file or object) when it runs.\r\n \"\"\"\r\n\r\n def loadauxsetup(self, auxpar = 'aux'):\r\n \"\"\" This object does not have a conventional setup function.\r\n Since it is intended to be inherited by other steps. This\r\n code should be used to set up the child step with the\r\n parameters auxfile and auxfitkeys.\r\n \r\n auxpar: Parameter name for the parameter containing\r\n the filepathname glob to search for auxiliary\r\n files.\r\n \r\n This function should be called in the setup function of\r\n the child step after self.paramlist has been initiated.\r\n \"\"\"\r\n # Set name of the auxfile parameter\r\n self.auxpar = auxpar\r\n # Append parameters\r\n self.paramlist.append([auxpar + 'file', '%sfolder/*.fits' % auxpar,\r\n 'Filename for auxiliary file(s). Can contain * and ? ' +\r\n 'wildcards to match multiple files to be selected using fitkeys ' +\r\n '(default = %sfolder/*.fits)' % auxpar])\r\n self.paramlist.append(['bkup'+auxpar, 'bkup%sfolder/*.fits' % auxpar,\r\n 'Back up filename for auxiliary file(s). Can contain * and ? ' +\r\n 'wildcards to match multiple files to be selected using fitkeys ' +\r\n '(default = bkup%sfolder/*.fits)' % auxpar])\r\n self.paramlist.append([auxpar + 'fitkeys', [],\r\n 'List of header keys that need to match auxiliary data file ' +\r\n '(default = []) - only used if multiple files ' +\r\n 'match %sfile' % auxpar])\r\n if 'daterange' not in [ par[0] for par in self.paramlist] :\r\n self.paramlist.append(['daterange',1.0,\r\n 'If DATE-OBS is in fitkeys, files are matched within this many days.'])\r\n\r\n def loadauxname(self, auxpar = '', data = None, multi = False):\r\n \"\"\" Searches for files matching auxfile. If only one match is\r\n found, that file is returned. Else the header\r\n keywords listed in auxfitkeys are matched between the\r\n data and the auxfiles which were found. The first auxfile\r\n for which these keywords values best match the ones\r\n from data is selected. The filename of the best match\r\n is returned.\r\n \r\n auxpar: A name for the aux file parameter to use. This\r\n allows loadauxfiles to be used multiple times\r\n in a given pipe step (for example for darks and\r\n flats). Default value is self.auxpar which is set\r\n by loadauxsetup().\r\n data: A pipedata object to match the auxiliary file to.\r\n If no data is specified self.datain is used (for\r\n Multi Input steps self.datain[0]).\r\n \"\"\"\r\n ### Setup\r\n # Set auxpar\r\n if len(auxpar) == 0:\r\n auxpar = self.auxpar\r\n # Get parameters\r\n auxfile = os.path.expandvars(self.getarg(auxpar + 'file'))\r\n fitkeys = self.getarg(auxpar + 'fitkeys')\r\n if len(fitkeys) == 1 and len(fitkeys[0]) == 0:\r\n fitkeys = []\r\n ### Look for files - return in special cases\r\n # Glob the list of files\r\n auxlist = glob.glob(auxfile)\r\n # Throw exception if no file found\r\n if len(auxlist) < 1:\r\n self.log.warn('No files found under %s - looking in backup' % auxfile)\r\n auxfile = os.path.expandvars(self.getarg('bkup'+auxpar))\r\n auxlist = glob.glob(auxfile)\r\n if len(auxlist) < 1:\r\n msg = 'No %s files found under %s' % (auxpar, auxfile)\r\n self.log.error(msg)\r\n raise ValueError(msg)\r\n # Get datain object (depends on step being SingleInput or MultiInput)\r\n if data == None:\r\n if issubclass(self.__class__, StepMIParent):\r\n data = self.datain[0]\r\n else:\r\n data = self.datain \r\n # Return unique file, or all files if fitkeys is empty\r\n if len(auxlist) == 1 or len(fitkeys) == 0:\r\n if len(auxlist) == 1:\r\n self.log.info('LoadAuxName: Found unique file = %s' % auxlist[0])\r\n else:\r\n self.log.info('LoadAuxName: No fitkeys: Return first %sfile match = %s' %\r\n (self.auxpar, auxlist[0]) )\r\n data.setheadval('HISTORY','%s: Best %sfile = %s' % \r\n (self.name, self.auxpar, os.path.split(auxlist[0])[1],))\r\n if multi:\r\n return auxlist\r\n else:\r\n return auxlist[0]\r\n ### Select files with Fitkeys\r\n # check format (make first element uppercase)\r\n try:\r\n _ = fitkeys[0].upper()\r\n except AttributeError:\r\n # AttributeError if it's not a string\r\n self.log.error('LoadAuxFile: fitkeys config parameter is ' +\r\n 'incorrect format - need list of strings')\r\n raise TypeError('fitkeys config parameter is incorrect format' +\r\n ' - need list of strings')\r\n # Load all headers from auxlist into a auxheadlist (pipedata objects)\r\n auxheadlist = []\r\n for auxnam in auxlist:\r\n auxheadlist.append(DataParent(config = self.config).loadhead(auxnam))\r\n # Look through keywords, only keep auxfiles which fit keys\r\n for key in fitkeys:\r\n newheadlist = []\r\n # Look through auxfiles, transfer good ones\r\n if key in 'DATE-OBS': # SPECIAL CASE DATE-OBS: \r\n # get time for data\r\n datime = time.mktime(time.strptime(data.getheadval('DATE-OBS'), \r\n '%Y-%m-%dT%H:%M:%S'))\r\n # get time offset (from data) for each auxfile\r\n auxtimes = []\r\n for auxhead in auxheadlist:\r\n auxtime = time.mktime(time.strptime(auxhead.getheadval('DATE-OBS'), \r\n '%Y-%m-%dT%H:%M:%S'))\r\n auxtimes.append(abs(auxtime-datime))\r\n # only keep auxfiles which are within daterange of closest auxfile\r\n mindiff = min(auxtimes)\r\n timerange = self.getarg('daterange') * 86400\r\n for auxi in range(len(auxheadlist)):\r\n if auxtimes[auxi] - mindiff < timerange:\r\n newheadlist.append(auxheadlist[auxi])\r\n else: # Normal Keyword compare\r\n for auxhead in auxheadlist:\r\n # Check if the auxfile fits (compare with data)\r\n if auxhead.getheadval(key) == data.getheadval(key) :\r\n # it fits -> add to newheadlist\r\n newheadlist.append(auxhead)\r\n # break key loop if no files left\r\n if len(newheadlist) == 0:\r\n break\r\n else:\r\n auxheadlist = newheadlist\r\n \r\n ### Select file to return\r\n if multi:\r\n # Return all filenames\r\n auxname = [aux.filename for aux in auxheadlist]\r\n # Return message\r\n if len(auxname) > 3:\r\n listnames = \"%d files: %s to %s\" % (len(auxname),auxname[0],auxname[-1])\r\n else:\r\n listnames = string.join(auxname)\r\n if len(newheadlist) > 0:\r\n self.log.info('LoadAuxName: Matching %s found are <%s>' % \r\n (auxpar, listnames) )\r\n else:\r\n self.log.warn('LoadAuxName: NO MATCH finding aux files')\r\n self.log.warn('Returning files <%s>' % listnames )\r\n else:\r\n # Return first filename\r\n auxname = auxheadlist[0].filename\r\n # Select best file\r\n if len(newheadlist) > 0:\r\n self.log.info('LoadAuxName: Matching %s found is <%s>' % \r\n (auxpar, auxname) )\r\n else:\r\n self.log.warn('LoadAuxName: NO MATCH finding aux file')\r\n self.log.warn('Returning first file <%s>' % auxname )\r\n listnames = auxname # just so we can use it below\r\n data.setheadval('HISTORY','%s: Best %s = %s' % \r\n (self.name, auxpar, listnames))\r\n # Return selected file\r\n return auxname\r\n\r\n def loadauxfile(self, auxpar = '', data = None, multi = False):\r\n \"\"\" Uses loadauxname to search for files matching auxfile.\r\n See loadauxname for parameter description.\r\n \r\n A pipedata object with the best match is returned.\r\n \"\"\"\r\n # Get auxname\r\n auxname = self.loadauxname(auxpar,data, multi)\r\n # Load auxdata\r\n if multi:\r\n auxdata = [ DataParent(config=self.config).load(auxnam) for auxnam in auxname ]\r\n else:\r\n auxdata = DataParent(config=self.config).load(auxname)\r\n # Return selected file\r\n return auxdata\r\n \r\n def test(self):\r\n \"\"\" Test Pipe Step Parent Object:\r\n Runs a set of basic tests on the object\r\n \"\"\"\r\n # log message\r\n self.log.info('Testing pipe step %s' %self.name)\r\n # Set up the step\r\n self.name = 'loadaux' # this is not used normally as loadaux is normally used as parent\r\n self.loadauxsetup('test1')\r\n self.loadauxsetup('test2')\r\n for par in self.paramlist:\r\n print(par)\r\n # Load input data\r\n self.datain = DataParent(config=self.config).load('IN_a0_1.fits')\r\n # Get test1 auxfile\r\n auxf = self.loadauxname('test1',multi=True)\r\n print('********** ' + repr(auxf))\r\n # Get test2 auxfile\r\n auxf = self.loadauxname('test2')\r\n print('********** ' + repr(auxf))\r\n # log message\r\n self.log.info('Testing pipe step %s - Done' %self.name)\r\n \r\nif __name__ == '__main__':\r\n \"\"\" Main function to run the pipe step from command line on a file.\r\n Command:\r\n python stepparent.py input.fits -arg1 -arg2 . . .\r\n Standard arguments:\r\n --config=ConfigFilePathName.txt : name of the configuration file\r\n -t, --test : runs the functionality test i.e. pipestep.test()\r\n --loglevel=LEVEL : configures the logging output for a particular level\r\n -h, --help : Returns a list of \r\n \"\"\"\r\n StepLoadAux().execute()\r\n\r\n\"\"\" === History ===\r\n\"\"\"\r\n","sub_path":"source/drp/steploadaux.py","file_name":"steploadaux.py","file_ext":"py","file_size_in_byte":12941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"414481441","text":"# -*- coding: utf-8 -*-\nimport logging\nfrom tests_selenium_iFrame.pages.frame_page import FramePage\nfrom tests_selenium_iFrame.utils.settings_parser import SettingsParser\nfrom tests_selenium_iFrame.utils.get_random_text import get_random_text\n\n\nclass TestIframe:\n test_settings = SettingsParser().get_test_settings()\n logging.basicConfig(filename=test_settings['log_filename'], level=logging.INFO,\n format=test_settings['log_format'])\n logger = logging.getLogger(__name__)\n\n def test_iframe(self):\n\n self.logger.info('Шаг 1. Открытие FramePage')\n\n frame_page = FramePage(self.test_settings)\n assert frame_page.is_opened(), self.logger.error(\"Frame page не открылась\")\n\n self.logger.info('Шаг 2. Отправка рандомного текста и проверка его')\n\n text = get_random_text()\n frame_page.type_text(text)\n assert frame_page.get_text() == text\n\n self.logger.info('Шаг 3. Сделать текст жирным')\n\n frame_page.do_text_bold()\n assert frame_page.is_text_bold(), self.logger.error(\"Текст не жирный\")\n","sub_path":"autotest_selenium_iFrame/tests_selenium_iFrame/tests/test_iframe.py","file_name":"test_iframe.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"429617142","text":"from dataclasses import dataclass, field\nimport logging\nfrom typing import Union, Optional, List\n\nfrom brain_brew.build_tasks.crowd_anki.headers_from_crowdanki import HeadersFromCrowdAnki\nfrom brain_brew.build_tasks.crowd_anki.headers_to_crowd_anki import HeadersToCrowdAnki\nfrom brain_brew.build_tasks.crowd_anki.media_to_from_crowd_anki import MediaToFromCrowdAnki\nfrom brain_brew.build_tasks.crowd_anki.note_models_from_crowd_anki import NoteModelsFromCrowdAnki\nfrom brain_brew.build_tasks.crowd_anki.note_models_to_crowd_anki import NoteModelsToCrowdAnki\nfrom brain_brew.build_tasks.crowd_anki.notes_from_crowd_anki import NotesFromCrowdAnki\nfrom brain_brew.build_tasks.crowd_anki.notes_to_crowd_anki import NotesToCrowdAnki\nfrom brain_brew.representation.json.crowd_anki_export import CrowdAnkiExport\nfrom brain_brew.representation.json.wrappers_for_crowd_anki import CrowdAnkiJsonWrapper\nfrom brain_brew.representation.yaml.note_model_repr import NoteModel\n\nfrom brain_brew.representation.build_config.build_task import TopLevelBuildTask\nfrom brain_brew.representation.build_config.representation_base import RepresentationBase\n\n\n@dataclass\nclass CrowdAnkiGenerate(TopLevelBuildTask):\n task_regex = r'generate_crowd_anki'\n\n @dataclass\n class Representation(RepresentationBase):\n folder: str\n notes: dict\n note_models: dict\n headers: dict\n media: Union[dict, bool] = field(default_factory=lambda: False)\n\n @classmethod\n def from_repr(cls, data: Union[Representation, dict]):\n rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data)\n return cls(\n crowd_anki_export=CrowdAnkiExport.create_or_get(rep.folder),\n notes_transform=NotesToCrowdAnki.from_repr(rep.notes),\n note_model_transform=NoteModelsToCrowdAnki.from_repr(rep.note_models),\n headers_transform=HeadersToCrowdAnki.from_repr(rep.headers),\n media_transform=MediaToFromCrowdAnki.from_repr(rep.media)\n )\n\n crowd_anki_export: CrowdAnkiExport\n notes_transform: NotesToCrowdAnki\n note_model_transform: NoteModelsToCrowdAnki\n headers_transform: HeadersToCrowdAnki\n media_transform: MediaToFromCrowdAnki\n\n def execute(self):\n headers = self.headers_transform.execute()\n ca_wrapper = CrowdAnkiJsonWrapper(headers)\n\n note_models: List[dict] = self.note_model_transform.execute()\n\n nm_name_to_id: dict = {model.name: model.id for model in self.note_model_transform.note_models}\n notes = self.notes_transform.execute(nm_name_to_id)\n\n media_files = self.media_transform.move_to_crowd_anki(\n self.notes_transform.notes, self.note_model_transform.note_models, self.crowd_anki_export)\n\n ca_wrapper.media_files = sorted([m.filename for m in media_files])\n ca_wrapper.name = self.headers_transform.headers.name\n ca_wrapper.note_models = note_models\n ca_wrapper.notes = notes\n\n # Set to CrowdAnkiExport\n self.crowd_anki_export.write_to_files(ca_wrapper.data)\n for media in media_files:\n media.copy_source_to_target()\n","sub_path":"brain_brew/build_tasks/crowd_anki/crowd_anki_generate.py","file_name":"crowd_anki_generate.py","file_ext":"py","file_size_in_byte":3173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"275534700","text":"import pickle\r\nfrom nltk.tokenize import TweetTokenizer\r\nfrom nltk.corpus import stopwords\r\nimport string\r\nimport datetime\r\n\r\nuserTweets = {}\r\nuserTerms = {}\r\n\r\ndef tokenizeTweets():\r\n global userTerms\r\n tknzr = TweetTokenizer(preserve_case = False)\r\n for user in userTweets:\r\n tweetInfo = userTweets[user]\r\n for tweet in tweetInfo:\r\n timeStamp = tweetInfo[tweet][1]\r\n convTimeStamp = datetime.datetime.strptime(timeStamp,'%a, %d %b %Y %H:%M:%S +0000')\r\n tweetCount = tweetInfo[tweet][0]\r\n words = tknzr.tokenize(tweet)\r\n for word in words:\r\n count = tweetCount\r\n terms = userTerms.get(user,{})\r\n if not isStopWord(word):\r\n if (word in terms):\r\n count = terms[word][0] + count\r\n timeTerms = terms[word][1]\r\n convTime = datetime.datetime.strptime(timeTerms,'%a, %d %b %Y %H:%M:%S +0000')\r\n if (convTime>convTimeStamp):\r\n timeStamp = timeStamp\r\n else:\r\n timeStamp = timeTerms\r\n terms[word] = [count,timeStamp]\r\n userTerms[user] = terms\r\n \r\ndef isStopWord(word):\r\n punctuation = list(string.punctuation)\r\n stop = stopwords.words('english') + punctuation + ['rt', 'via']#,'\"','.'\r\n if word in stop:\r\n return True\r\n else:\r\n return False\r\n \r\ndef getUserTweets():\r\n global userTweets\r\n userTweets = pickle.load(open(\"userTweets.pkl\",\"rb\"))\r\n\r\ndef main():\r\n getUserTweets() \r\n tokenizeTweets()\r\n pickle.dump(userTerms,open(\"userTerms.pkl\",\"wb\"))\r\n \r\n #term = nltk.word_tokenize(tweet.encode('ascii', 'ignore'))\r\n \r\nif __name__ == \"__main__\":\r\n main()","sub_path":"getUserTerms.py","file_name":"getUserTerms.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"602748835","text":"# -*- coding: utf-8 -*-\nimport cv2\nimport numpy as np\n# import matplotlib.pyplot as plt\n\nfrom PIL import Image\nfrom difflib import SequenceMatcher\nfrom PIL import *\nfrom PIL import ImageEnhance\nimport time\nfrom pytesseract import image_to_string, image_to_boxes\nimport os\nimport sys\n\n# define some variable\nENTER = 13\nINDEX = 0\n\n# initialize the list of reference points and boolean indicating\n# whether cropping is being performed or not\nrefPt = []\nrefPts = []\ncropping = False\n\n\ndef click_and_crop(event, x, y, flags, param):\n # grab references to the global variables\n global refPt, cropping\n\n # if the left mouse button was clicked, record the starting\n # (x, y) coordinates and indicate that cropping is being\n # performed\n if event == cv2.EVENT_LBUTTONDOWN:\n refPt = [(x, y)]\n cropping = True\n\n # check to see if the left mouse button was released\n elif event == cv2.EVENT_LBUTTONUP:\n # record the ending (x, y) coordinates and indicate that\n # the cropping operation is finished\n refPt.append((x, y))\n cropping = False\n\n # draw a rectangle around the region of interest\n cv2.rectangle(image, refPt[0], refPt[1], (0, 255, 0), 2)\n cv2.imshow(\"image\", image)\n\n\ndef get_coord(image):\n # load the image, clone it, and setup the mouse callback function\n clone = image.copy()\n global refPts\n refPts.clear()\n\n\n while True:\n cv2.namedWindow(\"image\")\n cv2.setMouseCallback(\"image\", click_and_crop)\n # keep looping until the 'q' key is pressed\n while True:\n # display the image and wait for a keypress\n cv2.imshow(\"image\", image)\n key = cv2.waitKey(0) & 0xFF\n\n # if the 'r' key is pressed, reset the cropping region\n if key == ord(\"r\"):\n image = clone.copy()\n\n # if the 'enter' or 'esc' key is pressed, break from the loop\n elif key == 13 or key == 27:\n break\n if key == 27:\n break\n\n # if there are two reference points, then crop the region of interest\n # from teh image and display it\n if len(refPt) == 2:\n roi = clone[refPt[0][1]:refPt[1][1], refPt[0][0]:refPt[1][0]]\n refPts += refPt\n\n # cv2.imshow(\"ROI\", roi)\n # key = cv2.waitKey(0)\n\n # close all open windows\n # cv2.destroyAllWindows()\n # print(refPts)\n cv2.destroyAllWindows()\n return refPts\n\n\ndef write_coord(img_address, coord):\n f = open(\"description\", \"a\")\n f.write(\"{}: {}\\n\".format(img_address, coord))\n f.close()\n\nif __name__ == '__main__':\n if len(sys.argv) != 4:\n print(\"Usage: process_data.py original_folder pos_folder neg_folder \")\n print(\"original_folder is the folder contains original image\")\n print(\"pos_folder is the folder contains the positive images after process\")\n print(\"neg_folder is the folder contains the negative images after process\")\n sys.exit(1)\n\n address = sys.argv[1]\n pos_folder = sys.argv[2]\n neg_folder = sys.argv[3]\n if not os.path.exists(pos_folder):\n os.makedirs(pos_folder)\n if not os.path.exists(neg_folder):\n os.makedirs(neg_folder)\n\n images = os.listdir(address)\n\n for file in images:\n img_address = \"{}/{}\".format(address, file)\n image = cv2.imread(img_address)\n coord = get_coord(image)\n write_coord(img_address,coord)\n\n","sub_path":"learning/Ruijie/handwriting_recognization_SVM/data_collection.py","file_name":"data_collection.py","file_ext":"py","file_size_in_byte":3476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"520370073","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('components', '0004_auto_20150714_1405'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='customerissue',\n old_name='short_description',\n new_name='issue_description',\n ),\n ]\n","sub_path":"OpenX1/components/migrations/0005_auto_20150714_1700.py","file_name":"0005_auto_20150714_1700.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"446416490","text":"import pygame, sys, math\r\nfrom pygame.locals import *\r\n\r\nfrom object_init import *\r\nfrom physics_engine import *\r\nfrom debug import *\r\nfrom pause_menu import *\r\nfrom game_loop_updates import *\r\nfrom game_loop_functions import *\r\nfrom settings import *\r\n\r\npygame.init()\r\n\r\n#Instantiate car/ball/walls\r\ncars = []\r\nball = {}\r\noriginalcarImg = pygame.image.load('car.jpg')\r\nrotation = 0\r\nangularVelocity = 0\r\ncarw, carh = originalcarImg.get_rect().size\r\ngoalW, goalH = 200, 400\r\nwallW = 10\r\nw, h = pygame.display.get_surface().get_size()\r\nwalls, wallDirections = createWalls(wallW, goalW, goalH, w, h)\r\nmass = 1500\r\ncarx = 400 #carx = 10\r\ncary = 200 #cary = h - carh - 10\r\ncomx, comy = carx + 25, cary + 12 #edit COM relative to center position of car (completely arbitrary right now)\r\nteam = ORANGE\r\n#createCarControls()\r\ncreateCar(cars, team, carx, cary, originalcarImg, carw, carh, mass, K_UP, K_DOWN, K_LEFT, K_RIGHT, K_SPACE, comx, comy, rotation, angularVelocity)\r\ncreateBall(ball)\r\n\r\nclicked = False\r\nphysics = False\r\nphysicsClicked = False\r\n\r\nrotating = False\r\n\r\ngameLoop = True\r\nwhile gameLoop: # the main game loop\r\n DISPLAYSURF.fill(GREY)\r\n pygame.draw.lines(DISPLAYSURF, BLACK, False, walls, 2)\r\n pygame.draw.rect(DISPLAYSURF, BLUE, (wallW, h - (wallW + goalH), goalW, goalH))\r\n pygame.draw.rect(DISPLAYSURF, WHITE, (walls[0][0], walls[0][1], w - 2 * (wallW + goalW), h - 2*wallW))\r\n pygame.draw.rect(DISPLAYSURF, ORANGE, (walls[6][0], walls[6][1], goalW, goalH))\r\n pygame.draw.lines(DISPLAYSURF, BLACK, False, walls, 2)\r\n\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n gameLoop = False\r\n \r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n print(\"OK\")\r\n pauseMenu(car)\r\n if event.key == car[\"ku\"]:\r\n car[\"angV\"] -= 200\r\n if event.key == car[\"kd\"]:\r\n car[\"angV\"] += 200\r\n if physics:\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == car[\"kr\"] and isOnGround(car):\r\n forces[\"driving\"][0] += 1000 #will be based on car rotation later\r\n forces[\"driving\"][1] += 0\r\n if event.key == car[\"kl\"] and isOnGround(car):\r\n forces[\"driving\"][0] += -1000 \r\n forces[\"driving\"][1] += 0\r\n if event.key == car[\"kb\"]:\r\n forces[\"boost\"][0] = -800 * math.cos(math.radians(car[\"r\"]))\r\n forces[\"boost\"][1] = 800 * math.sin(math.radians(car[\"r\"]))\r\n boostAnimation(car, True)\r\n if event.type == pygame.KEYUP:\r\n if event.key == car[\"ku\"]:\r\n car[\"angV\"] += 200\r\n if event.key == car[\"kd\"]:\r\n car[\"angV\"] += -200\r\n if event.key == car[\"kr\"] and isOnGround(car):\r\n forces[\"driving\"][0] += -1000\r\n forces[\"driving\"][1] += 0\r\n if event.key == car[\"kl\"] and isOnGround(car):\r\n forces[\"driving\"][0] += 1000\r\n forces[\"driving\"][1] += 0\r\n if event.key == car[\"kb\"]:\r\n forces[\"boost\"] = [0,0]\r\n boostAnimation(car, False)\r\n \r\n\r\n ####################\r\n # TEMPORARY SOLUTION\r\n # FOR DEBUGGING ONLY\r\n ####################\r\n if not physics:\r\n if pygame.key.get_focused():\r\n press = pygame.key.get_pressed()\r\n #release = pygame.key.get_released()\r\n #change velocity of cars based on directions pushed \r\n for car in cars:\r\n if press[car[\"kd\"]]:\r\n car[\"pos\"] = (car[\"pos\"][0], car[\"pos\"][1] + 2)\r\n if press[car[\"ku\"]]:\r\n car[\"pos\"] = (car[\"pos\"][0], car[\"pos\"][1] - 2)\r\n if press[car[\"kr\"]]:\r\n car[\"pos\"] = (car[\"pos\"][0] + 2, car[\"pos\"][1])\r\n if press[car[\"kl\"]]:\r\n car[\"pos\"] = (car[\"pos\"][0] - 2, car[\"pos\"][1])\r\n\r\n \r\n \r\n #draw every car on the frame\r\n for car in cars:\r\n carImg = pygame.transform.rotate(car[\"img\"], car[\"r\"])\r\n DISPLAYSURF.blit(carImg, car[\"dc\"])\r\n debugCornerLocations(car, BLUE)\r\n #DISPLAYSURF.blit(ball[\"img\"], ball[\"pos\"])\r\n \r\n #change position and rotation of car based on velocity and angular velocity\r\n for car in cars:\r\n #ideally should just have updateCar() in this for loop and that's it\r\n vx, vy = car[\"vel\"]\r\n #px = car[\"pos\"][0] + vx / FPS\r\n #py = car[\"pos\"][1] + vy / FPS\r\n #car[\"pos\"] = (px, py)\r\n car[\"r\"] += car[\"angV\"] / FPS\r\n updateCarRotation(car, FPS)\r\n updateCorners(car)\r\n updateCOM(car)\r\n #print(netAccel)\r\n if physics:\r\n netAccel = calculateCarNetAccel(car, forces)\r\n car[\"vel\"] = updateCarVelocity(car, netAccel)\r\n car[\"pos\"] = updateCarPosition(car, car[\"vel\"], 1 / FPS)\r\n debugDrawPointLocation((int(car[\"pos\"][0]), int(car[\"pos\"][1])), 3, ORANGE)\r\n #^position debug\r\n for car in cars:\r\n coords = getCollisionCoords(car, ball[\"pos\"], ball[\"r\"])\r\n if(coords != (-1, -1)):\r\n debugCollisionLocations(coords, 3, GREEN)\r\n\r\n testButton = workingButton(\"test button\", 25, BLACK, 100, 50, 100, 80, CYAN, BLUE)\r\n if testButton:\r\n if not clicked:\r\n if rotating:\r\n car[\"angV\"] = 0\r\n rotating = False\r\n else:\r\n car[\"angV\"] = 60\r\n rotating = True\r\n clicked = True\r\n else:\r\n clicked = False\r\n phsyiscsButton = workingButton(\"physics button\", 25, BLACK, 100, 135, 100, 80, CYAN, BLUE)\r\n if phsyiscsButton:\r\n if not physicsClicked:\r\n physics = not physics\r\n physicsClicked = True\r\n else:\r\n physicsClicked = False\r\n wallCarCollisions(cars, walls, wallDirections)\r\n## messageToScreen(\"Watch what the shell prints. If there's an error it'll print \\\"noooo\\\" followed by a number. Let me know what happens.\"\r\n## ,[500,660]\r\n## ,25)\r\n## messageToScreen(\"Please try to break it in any way you can think of (without editing the code cuz I know you'll claim that that still breaks it).\"\r\n## ,[500,680]\r\n## ,25)\r\n pygame.display.update()\r\n #waits until the car is done\r\n fpsClock.tick(FPS)\r\npygame.quit()\r\nsys.exit()\r\n","sub_path":"rl.py","file_name":"rl.py","file_ext":"py","file_size_in_byte":6609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"261286198","text":"from ortools.constraint_solver import routing_enums_pb2\nfrom ortools.constraint_solver import pywrapcp\n\nfrom tspapp.consumer.tsp.tsp import TSP\nfrom tspapp import logger\nfrom tspapp.consumer.tsp.vehical import Vehical\n\nclass FirstSolutionStrategy(TSP):\n def __init__(self, vehical: Vehical):\n self.data = {}\n self.vehical = vehical\n self.data['locations'] = vehical.locations\n self.data['num_vehicles'] = 1\n self.data['depot'] = 0\n\n def _print_solution(self, manager, routing, solution):\n logger.info('Objective: {}'.format(solution.ObjectiveValue()))\n index = routing.Start(0)\n plan_output = 'Route:\\n'\n route_distance = 0\n while not routing.IsEnd(index):\n plan_output += ' {} ->'.format(manager.IndexToNode(index))\n previous_index = index\n index = solution.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)\n plan_output += ' {}\\n'.format(manager.IndexToNode(index))\n logger.info(plan_output)\n plan_output += 'Objective: {}m\\n'.format(route_distance)\n\n def get_shortest_path_index(self,manager, routing, solution):\n index = routing.Start(0)\n route_distance = 0\n plan_output = 'Route: '\n while not routing.IsEnd(index):\n plan_output += ' {} ->'.format(manager.IndexToNode(index))\n previous_index = index\n index = solution.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)\n return plan_output\n\n def find_shortest_path(self):\n manager = pywrapcp.RoutingIndexManager(len(self.data['locations']),\n self.data['num_vehicles'], self.data['depot'])\n routing = pywrapcp.RoutingModel(manager)\n distance_matrix = self.vehical.eular_distance_locations()\n\n def distance_callback(from_index, to_index):\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return distance_matrix[from_node][to_node]\n\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.first_solution_strategy = (\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n solution = routing.SolveWithParameters(search_parameters)\n return self.get_shortest_path_index(manager, routing, solution)","sub_path":"tspapp/consumer/tsp/first_solution_strategy.py","file_name":"first_solution_strategy.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"290311648","text":"#-------------------------------------------------------------------------------\n# Name: KaminskyExport\n# Purpose: Export UltraMed info to CSV\n#\n# Author: Marc\n#\n# Created: 17 Jan 2018\n# Copyright: (c) Marc 2018\n# Licence: \n#-------------------------------------------------------------------------------\nimport os, zipfile, csv\nfrom datetime import datetime\nfrom decimal import *\nfrom fsrStuff import fixDate\nfrom fsrStuff.umFuncs import RecordGenerator, parseDirectDat\nfrom fsrStuff.NPIFile import NPIFile\n\nclass Global(object):\n umDrive = 'E:'\n umDir = 'Ultramed'\n offNum = '03'\n docsToDo = ['41']\n startDate = '20170101'\n endDate = '20171231'\n\nclass Bunch(object):\n def __init__(self, **kwds):\n self.__dict__.update(kwds)\n\n\nclass mmExport(object):\n csvFile=os.path.join(os.path.expanduser(\"~\"),\"Desktop\",\"Export.csv\")\n fieldnames = ['example']\n def __init__(self):\n self.items = []\n self.Ins = {}\n self.Doc = {}\n self.Proc = {}\n self.offices = parseDirectDat(os.path.join(Global.umDrive, os.sep, Global.umDir, \"direct.dat\"))\n self.offices = { num: off for num, off in self.offices.items() if num == Global.offNum }\n\n def cvtToCSV(self):\n with open(self.csvFile, 'w') as outFile:\n writer = csv.DictWriter(outFile, self.fieldnames, quoting = csv.QUOTE_ALL, lineterminator='\\n')\n writer.writeheader()\n writer.writerows(self.items)\n\n def insPlan(self, FC, insCo):\n insPlan = \"\"\n if FC == \"01\": return \"Medicare\"\n elif FC == \"02\": return \"Medi-Cal\"\n elif FC == \"03\": return \"Medi-Medi\"\n else:\n try:\n return self.Ins[insCo]\n except KeyError:\n return insCo\n\n\nclass Insurance(mmExport):\n csvFile=os.path.join(os.path.expanduser(\"~\"),\"Desktop\",\"kam\", \"Insurance.csv\")\n fieldnames = [\"ID\", \"Name\", \"Street\", \"Address1\", \"City\", \"State\", \"Zip\", \"Tel\", \"Contact\"]\n def __init__(self):\n mmExport.__init__(self)\n for num, off in self.offices.iteritems():\n count = 0\n for obj in RecordGenerator(off, \"Insurance\"):\n if (obj.Valid):\n count += 1\n line = {\"ID\":obj.ID,\n \"Name\":obj.Name,\n \"Street\":obj.Street,\n \"Address1\":obj.Address1,\n \"City\":obj.City,\n \"State\":obj.State,\n \"Zip\":obj.Zip,\n \"Tel\":obj.Tel,\n \"Contact\":obj.Contact}\n self.items.append(line)\n if not count % 1000: print(count)\n\nclass Referral(mmExport):\n csvFile=os.path.join(os.path.expanduser(\"~\"),\"Desktop\",\"kam\", \"Referral.csv\")\n fieldnames = [\"ID\",\"Company\", \"LastName\", \"FirstName\", \"MI\"]\n def __init__(self):\n mmExport.__init__(self)\n for num, off in self.offices.iteritems():\n count = 0\n for obj in RecordGenerator(off, \"ReferralSource\"):\n if (obj.Valid):\n count += 1\n line = {\"ID\":obj.ID,\n \"Company\":obj.Company,\n \"LastName\":obj.LastName,\n \"FirstName\":obj.FirstName,\n \"MI\":obj.MI}\n self.items.append(line)\n if not count % 1000: print(count)\n\nclass Demographics(mmExport):\n csvFile=os.path.join(os.path.expanduser(\"~\"),\"Desktop\",\"kam\", \"Demographics.csv\")\n fieldnames = [\"LastName\", \"FirstName\", \"MI\", \"Address1\", \"Street\", \"City\", \"State\", \"Zip\", \"Birthdate\", \"Sex\", \"RefID\",\"InsCompany\", \"InsuredID\"]\n def __init__(self):\n mmExport.__init__(self)\n for num, off in self.offices.iteritems():\n countIn = countOut = 0\n for obj in RecordGenerator(off, \"Insurance\"):\n self.Ins[obj.ID] = obj.Name\n for obj in RecordGenerator(off, \"Patient\"):\n countIn += 1\n if (obj.Valid):\n countOut += 1\n line = {\"LastName\": obj.LastName,\n \"FirstName\": obj.FirstName,\n \"MI\": obj.MI,\n \"Address1\": obj.Address1,\n \"Street\": obj.Street,\n \"City\": obj.City,\n \"State\": obj.State,\n \"Zip\": obj.Zip,\n \"Birthdate\": obj.Birthdate,\n \"Sex\": obj.Sex,\n \"RefID\": obj.ReferredBy, \n \"InsCompany\": self.insPlan(obj.FinancialCategory, obj.PriInsCompany),\n \"InsuredID\": obj.PriInsuredID}\n self.items.append(line)\n if not countIn % 1000: print(countIn, countOut)\n\nclass Vizel(mmExport):\n csvFile=os.path.join(os.path.expanduser(\"~\"),\"Desktop\",\"kam\", \"Vizel.csv\")\n fieldnames = [\"LastName\", \"FirstName\", \"MI\", \"DOB\", \"Sex\"]\n def __init__(self):\n sex = {'M':'Male', 'F':'Female'}\n mmExport.__init__(self)\n for num, off in self.offices.iteritems():\n pats = {}\n for obj in RecordGenerator(off, \"TransactionD\"):\n if obj.Valid and not obj.ExtChg and \\\n (obj.Doctor in Global.docsToDo) and (Global.startDate < obj.Date < Global.endDate):\n pats[obj.ChartNumber] = obj.ChartNumber\n for obj in RecordGenerator(off, \"Patient\"):\n if (obj.Valid) and (obj.ChartNumber in pats) and (obj.FinancialCategory in ('01', '03')):\n line = {\"LastName\": obj.LastName,\n \"FirstName\": obj.FirstName,\n \"MI\": obj.MI,\n \"DOB\": fixDate(obj.Birthdate, spaces=False, slashes=True),\n \"Sex\": sex[obj.Sex]}\n self.items.append(line)\n\n\nif __name__ == '__main__':\n\n for obj in (Vizel,):\n print(\"Starting \" + obj.__name__ + \"... \")\n thing = obj()\n thing.cvtToCSV()\n print(\"... finished\")\n\n","sub_path":"kam-expCSV.py","file_name":"kam-expCSV.py","file_ext":"py","file_size_in_byte":6319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"384995084","text":"from lux.utils import test\n\n\nclass TestWrappers(test.TestCase):\n config_file = 'tests.core'\n\n async def test_is_secure(self):\n app = self.application()\n client = self.app_client(app)\n request = await client.get('/')\n self.assertFalse(request.is_secure)\n\n async def test_logger(self):\n app = self.application()\n client = self.app_client(app)\n request = await client.get('/')\n self.assertNotEqual(app.logger, request.logger)\n","sub_path":"tests/core/test_wrappers.py","file_name":"test_wrappers.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"168295229","text":"import sympy\nimport concurrent.futures\n\n\ndef tCalculaPrimo(data):\n primos = 0\n for i in range(len(data)):\n if sympy.isprime(data[i]):\n primos += 1\n return primos\n\ndef resolve_trhread(data):\n ThreadsQtdd = 5\n tamanholista = len(data)\n index = range(0, tamanholista+(tamanholista//ThreadsQtdd), tamanholista//ThreadsQtdd)\n primos = 0\n with concurrent.futures.ThreadPoolExecutor() as executor:\n futures = []\n for i in range(ThreadsQtdd):\n futures.append(executor.submit(tCalculaPrimo, data=data[index[i]:index[i+1]]))\n for future in concurrent.futures.as_completed(futures):\n #futures.append(future.result())\n primos += future.result()\n return primos\n","sub_path":"mtrhead.py","file_name":"mtrhead.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"319036856","text":"import pygame\nfrom pygame.surface import Surface\nfrom src.UI.image import Image\n\n\nclass ImageBackground(Image):\n def __init__(self, center, size, color=(0, 0, 0), smooth=False):\n super().__init__(center, Surface(size))\n if len(color) == 4 or smooth:\n self.image = self.image.convert_alpha()\n if not smooth:\n self.image.fill(color)\n else:\n self.image.fill((0, 0, 0, 0))\n r = min(int(1 / 10 * size[0]), int(1 / 10 * size[1]))\n w = int(size[0])\n h = int(size[1])\n pygame.draw.circle(self.image, color, (r, r), r)\n pygame.draw.circle(self.image, color, (r, h - r), r)\n pygame.draw.circle(self.image, color, (w - r, r), r)\n pygame.draw.circle(self.image, color, (w - r, h - r), r)\n pygame.draw.rect(self.image, color, (0, r, w, h - 2 * r))\n pygame.draw.rect(self.image, color, (r, 0, w - 2 * r, h))\n","sub_path":"src/UI/image_background.py","file_name":"image_background.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"154662276","text":"import unittest\nfrom main import *\n\nclass TestDatabase(unittest.TestCase):\n\n def test_regions_table(self):\n conn = sqlite3.connect(\"hikes.db\")\n cur = conn.cursor()\n\n sql = 'SELECT name FROM regions'\n results = cur.execute(sql)\n result_list = results.fetchall()\n self.assertIn(('Central Cascades',), result_list)\n self.assertEqual(len(result_list), 11)\n conn.close()\n\n def test_hikes_table(self):\n conn = sqlite3.connect(\"hikes.db\")\n cur = conn.cursor()\n\n sql = '''\n SELECT name\n FROM hikes\n WHERE regionId=\"b4845d8a21ad6a202944425c86b6e85f\"\n '''\n results = cur.execute(sql)\n result_list = results.fetchall()\n self.assertIn(('Scorpion Mountain',), result_list)\n self.assertEqual(len(result_list), 9)\n\n sql = '''\n SELECT COUNT(*)\n FROM hikes\n '''\n results = cur.execute(sql)\n count = results.fetchone()[0]\n self.assertTrue(count == 107)\n\n conn.close()\n\n def test_hike_reviews_table(self):\n \tconn = sqlite3.connect(\"hikes.db\")\n \tcur = conn.cursor()\n\n \tsql = '''\n \t SELECT COUNT(*)\n \t FROM hikeReviews\n \t WHERE hikeId = 1\n \t'''\n \tresults = cur.execute(sql)\n \tcount = results.fetchone()[0]\n \tself.assertEqual(count, 5)\n \tconn.close()\n\n def test_restaurants_table(self):\n conn = sqlite3.connect(\"hikes.db\")\n cur = conn.cursor()\n\n sql = '''\n \t SELECT name\n \t FROM restaurants\n \t WHERE hikeId = 1\n \t'''\n results = cur.execute(sql)\n result_list = results.fetchall()\n self.assertIn((\"Georgia's Bakery\",), result_list)\n self.assertEqual(len(result_list), 5)\n\n def test_joins(self):\n conn = sqlite3.connect(\"hikes.db\")\n cur = conn.cursor()\n\n sql = '''\n SELECT hikes.name\n FROM hikes\n JOIN regions\n ON hikes.regionId=regions.Id\n WHERE regions.name=\"Central Cascades\"\n '''\n results = cur.execute(sql)\n result_list = results.fetchall()\n self.assertIn(('Scorpion Mountain',), result_list)\n self.assertEqual(len(result_list), 9)\n conn.close()\n\nunittest.main()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"277875807","text":"import urllib3\nfrom bs4 import BeautifulSoup\n\nhttp = urllib3.PoolManager()\npagina = http.request('GET','https://pt.wikipedia.org/wiki/Linguagem_de_programa%C3%A7%C3%A3o1')\n\nsopa = BeautifulSoup(pagina.data,\"lxml\")\nfor tags in sopa(['script','style']):\n tags.decompose()\nconteudo = ' '.join(sopa.stripped_strings)","sub_path":"exemplo_extração_conteúdo.py","file_name":"exemplo_extração_conteúdo.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"597726517","text":"import uuid\nimport time\nimport threading\nfrom redis import Redis\n\n\nclass DistributedLock(object):\n def __init__(self, key, client=Redis(), initial_ttl=5, renew_ttl=5, polling_interval=2,\n renew_interval=2):\n \"\"\"\n 基于redis的分布式锁\n :param key: 标识一个锁的key,不同的锁需要使用不同的key\n :param client: redis客户端\n :param initial_ttl: 锁开始时设置的生存时间\n :param renew_ttl: 每次renew生存期是,所设置的值\n :param polling_interval: 如果未获取到锁,则过polling_interval后重试\n :param renew_interval: 每次renew生存期的事间间隔\n \"\"\"\n self._key = key\n self._redis_client = client\n self._initial_ttl = initial_ttl\n self._renew_ttl = renew_ttl\n self._polling_interval = polling_interval\n self._renew_interval = renew_interval\n\n def __enter__(self):\n self._lock()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self._unlock()\n\n def _lock(self):\n while True:\n self._value = str(uuid.uuid4())\n if self._redis_client.set(self._key, self._value, ex=self._initial_ttl, nx=True):\n self._is_renew_ttl = True\n self._update_lock_ttl()\n break\n time.sleep(self._polling_interval)\n\n def _unlock(self):\n unlock_script = \"\"\"\n if redis.call('get', KEYS[1]) == ARGV[1] then\n return redis.call('del', KEYS[1]) \n else\n return 0\n end\n \"\"\"\n self._redis_client.eval(unlock_script, 1, self._key, self._value)\n self._is_renew_ttl = False\n\n def _update_lock_ttl(self):\n\n def _thread_target():\n while self._is_renew_ttl is True:\n renew_script = \"\"\"\n if redis.call('get', KEYS[1]) == ARGV[1] then\n return redis.call('expire',KEYS[1],ARGV[2])\n else\n return 0 \n end \n \"\"\"\n self._redis_client.eval(renew_script, 1, self._key, self._value, self._renew_ttl)\n time.sleep(self._renew_interval)\n\n threading.Thread(target=_thread_target, args=()).start()\n\n\nif __name__ == '__main__':\n with DistributedLock(\"some key\"):\n print(\"with lock block\")\n time.sleep(3)\n print(\"without lock block\")\n time.sleep(3)","sub_path":"lock.py","file_name":"lock.py","file_ext":"py","file_size_in_byte":2440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"574757834","text":"# From Python\n# It requires OpenCV installed for Python\nimport sys\nimport cv2\nimport os\nfrom sys import platform\n\nfrom core.lib import HKIPcamera1\nfrom core.lib import HKIPcamera2\nfrom core.lib import HKIPcamera3\nimport numpy as np\nimport time\n\nfrom core.util.angle_calculator import AngleCalculator\n\nfrom core.config.app_config import logger\n\nname = 'admin' # 管理员用户名\npw = 'shihang123' # 管理员密码\nHKIPcamera1.init('10.90.90.91', name, pw) # front\nHKIPcamera2.init('10.90.90.92', name, pw) # left\nHKIPcamera3.init('10.90.90.93', name, pw) # right\n\n# Import Openpose (Windows/Ubuntu/OSX)\ndir_path = os.path.dirname(os.path.realpath(__file__))\ntry:\n # Windows Import\n if platform == \"win32\":\n # Change these variables to point to the correct folder (Release/x64 etc.)\n sys.path.append(dir_path + '/../../python/openpose/Release');\n os.environ['PATH'] = os.environ['PATH'] + ';' + dir_path + '/../../x64/Release;' + dir_path + '/../../bin;'\n import pyopenpose as op\n else:\n # Change these variables to point to the correct folder (Release/x64 etc.)\n sys.path.append('/home/zhangjiang/code/openpose/openpose/build/python');\n # If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it.\n # sys.path.append('/usr/local/python')\n from openpose import pyopenpose as op\nexcept ImportError as e:\n print('Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?')\n raise e\n\n# Flags\n\n# Custom Params (refer to include/openpose/flags.hpp for more parameters)\nparams = dict()\nparams[\"model_folder\"] = \"/home/zhangjiang/code/openpose/openpose/models/\"\n\n# Construct it from system arguments\n# op.init_argv(args[1])\n# oppython = op.OpenposePython()\n\n# Starting OpenPose\nopWrapper = op.WrapperPython()\nopWrapper.configure(params)\nopWrapper.start()\n\nangle_calculator = AngleCalculator()\n\n\nwhile True:\n datum = op.Datum()\n\n # frame1 front\n t0 = time.time()\n frame = HKIPcamera1.getframe()\n t1 = time.time()\n process_frame1 = cv2.pyrDown(np.rot90(np.array(frame)))\n t2 = time.time()\n\n datum.cvInputData = process_frame1\n t3 = time.time()\n\n opWrapper.emplaceAndPop([datum])\n t4 = time.time()\n if datum.poseKeypoints.shape:\n key_points1 = datum.poseKeypoints[0].tolist()\n key_points1 = [(None, None, None) if point[:2] == [0.0, 0.0] else tuple(point) for point in key_points1]\n else:\n key_points1 = [(None, None, None)] * 25\n logger.warn(\"can not detect person in camera 1\")\n t5 = time.time()\n\n # frame2 left\n frame2 = HKIPcamera2.getframe()\n process_frame2 = cv2.pyrDown(np.rot90(np.array(frame2)))\n datum.cvInputData = process_frame2\n opWrapper.emplaceAndPop([datum])\n if datum.poseKeypoints.shape:\n key_points2 = datum.poseKeypoints[0].tolist()\n key_points2 = [(None, None, None) if point[:2] == [0.0, 0.0] else tuple(point) for point in key_points2]\n else:\n key_points2 = [(None, None, None)] * 25\n logger.warn(\"can not detect person in camera 2\")\n\n # frame3 right\n frame3 = HKIPcamera3.getframe()\n process_frame3 = cv2.pyrDown(np.rot90(np.array(frame3)))\n datum.cvInputData = process_frame3\n opWrapper.emplaceAndPop([datum])\n if datum.poseKeypoints.shape:\n key_points3 = datum.poseKeypoints[0].tolist()\n key_points3 = [(None, None, None) if point[:2] == [0.0, 0.0] else tuple(point) for point in key_points3]\n else:\n key_points3 = [(None, None, None)] * 25\n logger.warn(\"can not detect person in camera 3\")\n\n key_points_all = {}\n process_frame4 = np.concatenate([process_frame1, process_frame2, process_frame3], axis=1)\n datum.cvInputData = process_frame4\n opWrapper.emplaceAndPop([datum])\n\n width_single_img = process_frame1.shape[1]\n for idx, poseKeypoints in enumerate(datum.poseKeypoints):\n key_points = poseKeypoints.tolist()\n\n x_max = max([point[0] for point in key_points])\n\n if 0 <= x_max and x_max < width_single_img:\n key_points_all.update({'{}_1'.format(idx, ): [(None, None, None) if point[:2] == [0.0, 0.0] else tuple(point) for point in key_points]})\n elif width_single_img <= x_max and x_max < width_single_img * 2:\n key_points_all.update({'{}_2'.format(idx, ): [(None, None, None) if point[:2] == [0.0, 0.0] else (point[0] - width_single_img, point[1], point[2]) for point in key_points]})\n else:\n key_points_all.update({'{}_3'.format(idx, ): [(None, None, None) if point[:2] == [0.0, 0.0] else (point[0] - 2 * width_single_img, point[1], point[2]) for point in key_points]})\n\n# calculate angles\n angle_calculator.update_every_frame(key_points1, key_points3, key_points2)\n a = 1\n\n print('t2-t1 {}, t3-t2 {}, t4-t3 {}, t5-t4 {}'.format(t2-t1, t3-t2, t4-t3, t5-t4))\n\n","sub_path":"openpose_python.py","file_name":"openpose_python.py","file_ext":"py","file_size_in_byte":5124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"182518066","text":"from simple_net import *\nimport numpy as np\nimport random\nimport math\nimport pickle\n\nBATCH_SIZE = 64\nEPOCH = 2\n\nnet = SimpleNet(cross_entropy_loss, Adam(),\\\n layers=[\n FullConnectedLayer(784, 160),\n LeakyReLULayer(),\n \n FullConnectedLayer(160, 64),\n BatchNormLayer(64),\n DropoutLayer(0.4),\n LeakyReLULayer(),\n\n FullConnectedLayer(64, 64),\n BatchNormLayer(64),\n DropoutLayer(0.4),\n LeakyReLULayer(),\n\n FullConnectedLayer(64, 10),\n SoftmaxLayer()\n ])\n\n\nwith open('mnist_dataset.pkl', 'rb') as f:\n train_dataset, eval_dataset = pickle.load(f)\n\ndataloader = DataLoader(*train_dataset, BATCH_SIZE, EPOCH)\n\ndef eval(eval_data, eval_labels):\n a = np.argmax(eval_labels, axis=1)\n b = np.argmax(net.predict(eval_data), axis=1)\n return np.mean(a==b)\n\nfor pack in dataloader:\n loss = net.train(*pack)\n \n if dataloader.iter_cnt % 50 == 0:\n net.optimizer.lr = 0.0015 + 0.001 * np.sin(dataloader.iter_cnt / ( 60000 / BATCH_SIZE ) * 2 * np.pi)\n acc = eval(*eval_dataset)\n print(\"Epoch %s | Iteration %s | Loss %.4f | Acc %s\" %(dataloader.epoch_cnt, dataloader.iter_cnt, loss, acc))\n\nsave_network(net, 'net.pkl')","sub_path":"my_code/net/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"636016154","text":"\"Disclaimer : This must be run only once on that CSV Files Folder-- if u run twice we lost 3lines of data every time.\" \r\n\r\n\"This file is regarding : To eliminate the unwanted stuff and to collect all the attributes in an outfile.txt\"\r\n\r\nimport sys,glob, shutil\r\nimport os, sys\r\nimport linecache, csv\r\nimport fileinput\r\nimport ast\r\n\r\n# copy samsung_data original folder to another samsung_2 folder on that we perform preprocessing\r\n\r\nfrom distutils.dir_util import copy_tree\r\n\r\nfromDirectory = \"/home/user/Documents/Data\"\r\ntoDirectory = \"/home/user/Documents/Data_2\"\r\n\r\ncopy_tree(fromDirectory, toDirectory)\r\n\r\n# Function to reverse a string\r\ndef reverse(string):\r\n string = \"\".join(reversed(string))\r\n return string\r\n\r\n#cmd_star_str = 'echo \"*************************************\" >> /home/user/Desktop/outfile1'\r\n#Preprocessing the samsung_2 *.csv files\r\nos.chdir(\"/home/user/Documents/Data_2\")\r\nfor file in glob.glob(\"*.csv\"):\r\n\tprint (\"******************\") \r\n\tprint (file)\r\n\tf_name_temp=file\r\n\tf_name_temp1=file[14:]\r\n\tf_name_temp2=reverse(f_name_temp1)\r\n\tf_name_temp2=f_name_temp2[4:]\r\n\tfolder_name=reverse(f_name_temp2)\r\n\tprint (folder_name)\r\n\ttry:\r\n\t os.makedirs(folder_name)\r\n\texcept OSError:\r\n \t\tif os.path.exists(folder_name):\r\n \t\t# We are nearly safe\r\n \t\tpass\r\n\t\t#else:\r\n \t\t# There was an error on creation, so make sure we know about it\r\n \t\t#raise\r\n\t\r\n\t#To remove three lines\r\n\tfor line_number, line1 in enumerate(fileinput.input(file, inplace=1)):\r\n\t\tif line_number == 0 :\r\n\t\t\tcontinue\r\n\t\telif line_number == 1 :\r\n\t\t\tcontinue\r\n\t\telif line_number == 2 :\r\n\t\t\tcontinue\r\n\t\telse:\r\n\t\t\tsys.stdout.write(line1)\r\n\tline = linecache.getline(file, 1)\r\n\ttemp=line[0:66]\r\n\tattributes= line[66:]\r\n\t#to remove unwanted things from Attributes\r\n\tnew_attributes1=attributes.replace(\"(count)\", \"\")\r\n\tnew_attributes2=new_attributes1.replace(\"(ms)\", \"\")\r\n\tnew_attributes3=new_attributes2.replace(\"(kbps)\", \"\")\r\n\tnew_attributes4=new_attributes3.replace(\"(mbps)\", \"\")\r\n\tnew_attributes5=new_attributes4.replace(\"(Kbytes)\", \"\")\r\n\tnew_attributes6=new_attributes5.replace(\"(mbytes)\", \"\")\r\n\tnew_attributes7=new_attributes6.replace(\"(Kbps)\", \"\")\r\n\tnew_attributes8=new_attributes7.replace(\"(%)\",\"\")\r\n\tnew_attributes9=new_attributes8.replace(\"(A)\", \"\")\r\n\tnew_attributes10=new_attributes9.replace(\"(dBm)\", \"\")\r\n\tnew_attributes11=new_attributes10.replace(\"(s)\", \"\")\r\n\tnew_attributes12=new_attributes11.replace(\"(V)\", \"\")\r\n\tnew_attributes13=new_attributes12.replace(\"(Mbytes)\", \"\")\r\n\tnew_attributes14=new_attributes13.replace(\"(Mbps)\", \"\")\r\n\tnew_attributes15=new_attributes14.replace(\"(RI)\", \"\")\r\n\tnew_attributes16=new_attributes15.replace(\"(TTI)\", \"\")\r\n\tnew_attributes17=new_attributes16.replace(\"(CQI)\", \"\")\r\n\tnew_attributes18=new_attributes17.replace(\"(ppm)\", \"\")\r\n\tnew_attributes18=new_attributes17.replace(\"(dB)\", \"\")\r\n\tnew_attributes19=new_attributes18.replace(\"(bytes)\", \"\")\r\n\tnew_attributes20=new_attributes19.replace(\"(tcID)\", \"\")\r\n\tnew_attributes21=new_attributes20.replace(\"(scID)\", \"\")\t\t\r\n\ttemp_new=temp.replace(\"#attribute:\",\"\")\r\n\r\n\tStringAttributes=temp_new+new_attributes21\r\n\t\r\n\tlist_attributes=new_attributes21.split(\",\");\r\n\tprint (StringAttributes)\r\n\tprint (list_attributes)\r\n\t#to replace the original attributes with our newly processes attributes\r\n\tf=open(file,'r+')\r\n\tf.seek(0)\r\n\tf.write(StringAttributes)\r\n\tf.close()\r\n\t#after replacing we are getting the extra string remained in the 1st line to 2nd line---- so we are deleting the 2nd line also.\r\n\tfor line_number, line1 in enumerate(fileinput.input(file, inplace=1)):\r\n\t\tif line_number == 1 :\r\n\t\t\tcontinue\r\n\t\telse:\r\n\t\t\tsys.stdout.write(line1)\r\n\tsource_filepath ='%s' %file \r\n\tfolder_name = '%s/%s' %(folder_name, file)\r\n\tprint (source_filepath)\r\n\tprint (folder_name)\r\n\tshutil.copyfile(source_filepath, folder_name)\r\n\r\n\t\t\t\t\r\n\r\n\r\n","sub_path":"PMS FINAL SCRIPTS/PMS_Mapping_script.py","file_name":"PMS_Mapping_script.py","file_ext":"py","file_size_in_byte":3819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"}